From 179e4ee701a363706040457713d58d8fd6751190 Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 12:52:24 -0400 Subject: [PATCH 01/10] refactor: deduplicate CosineSimilarity into shared mathutil package Extract canonical CosineSimilarity to internal/mathutil with float64 accumulation for better numerical precision. Remove 3 duplicate implementations from retrieval agent, sqlite store, and main.go. agentutil.CosineSimilarity now delegates to mathutil. Addresses finding #1 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- cmd/mnemonic/dedup_test.go | 6 ++++-- cmd/mnemonic/main.go | 21 +++--------------- internal/agent/agentutil/math.go | 19 ++-------------- internal/agent/retrieval/agent.go | 20 ++--------------- internal/agent/retrieval/diversity_test.go | 5 +++-- internal/mathutil/cosine.go | 25 ++++++++++++++++++++++ internal/store/sqlite/abstractions.go | 3 ++- internal/store/sqlite/patterns.go | 6 +++--- internal/store/sqlite/sqlite.go | 19 ---------------- 9 files changed, 44 insertions(+), 80 deletions(-) create mode 100644 internal/mathutil/cosine.go diff --git a/cmd/mnemonic/dedup_test.go b/cmd/mnemonic/dedup_test.go index 187b681a..43f27cb9 100644 --- a/cmd/mnemonic/dedup_test.go +++ b/cmd/mnemonic/dedup_test.go @@ -3,6 +3,8 @@ package main import ( "math" "testing" + + "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" ) func TestCosineSim(t *testing.T) { @@ -21,9 +23,9 @@ func TestCosineSim(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := cosineSim(tt.a, tt.b) + got := agentutil.CosineSimilarity(tt.a, tt.b) if math.Abs(float64(got-tt.want)) > 0.01 { - t.Errorf("cosineSim() = %v, want %v", got, tt.want) + t.Errorf("CosineSimilarity() = %v, want %v", got, tt.want) } }) } diff --git a/cmd/mnemonic/main.go b/cmd/mnemonic/main.go index 75c02dd0..ed7010f7 100644 --- a/cmd/mnemonic/main.go +++ b/cmd/mnemonic/main.go @@ -20,6 +20,7 @@ import ( "syscall" "time" + "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" "github.com/appsprout-dev/mnemonic/internal/config" "github.com/appsprout-dev/mnemonic/internal/daemon" "github.com/appsprout-dev/mnemonic/internal/events" @@ -3101,7 +3102,7 @@ func dedupCommand(configPath string, dryRun bool) { comparisons := 0 for i := 0; i < len(withEmbeddings); i++ { for j := i + 1; j < len(withEmbeddings); j++ { - sim := cosineSim(withEmbeddings[i].Embedding, withEmbeddings[j].Embedding) + sim := agentutil.CosineSimilarity(withEmbeddings[i].Embedding, withEmbeddings[j].Embedding) comparisons++ if sim >= threshold { union(withEmbeddings[i].ID, withEmbeddings[j].ID) @@ -3301,7 +3302,7 @@ func resetPatternsCommand(configPath string, dryRun bool) { for ai := 0; ai < len(active); ai++ { for bi := ai + 1; bi < len(active); bi++ { i, j := active[ai], active[bi] - sim := cosineSim(patterns[i].Embedding, patterns[j].Embedding) + sim := agentutil.CosineSimilarity(patterns[i].Embedding, patterns[j].Embedding) if sim >= mergeThreshold { ri, rj := findRoot(i), findRoot(j) if ri != rj { @@ -3384,22 +3385,6 @@ func resetPatternsCommand(configPath string, dryRun bool) { } } -// cosineSim computes cosine similarity between two float32 vectors. -func cosineSim(a, b []float32) float32 { - if len(a) != len(b) || len(a) == 0 { - return 0 - } - var dot, na, nb float64 - for i := range a { - dot += float64(a[i]) * float64(b[i]) - na += float64(a[i]) * float64(a[i]) - nb += float64(b[i]) * float64(b[i]) - } - if na == 0 || nb == 0 { - return 0 - } - return float32(dot / (math.Sqrt(na) * math.Sqrt(nb))) -} // truncate shortens a string to maxLen with ellipsis. func truncate(s string, maxLen int) string { diff --git a/internal/agent/agentutil/math.go b/internal/agent/agentutil/math.go index 1ec6ff8e..932a221c 100644 --- a/internal/agent/agentutil/math.go +++ b/internal/agent/agentutil/math.go @@ -1,26 +1,11 @@ package agentutil -import "math" +import "github.com/appsprout-dev/mnemonic/internal/mathutil" // CosineSimilarity computes cosine similarity between two embedding vectors. // Returns 0 if vectors are different lengths, empty, or have zero magnitude. func CosineSimilarity(a, b []float32) float32 { - if len(a) != len(b) || len(a) == 0 { - return 0 - } - - var dotProduct, normA, normB float32 - for i := range a { - dotProduct += a[i] * b[i] - normA += a[i] * a[i] - normB += b[i] * b[i] - } - - if normA == 0 || normB == 0 { - return 0 - } - - return dotProduct / (float32(math.Sqrt(float64(normA))) * float32(math.Sqrt(float64(normB)))) + return mathutil.CosineSimilarity(a, b) } // AverageVectors computes the element-wise average of a set of float32 vectors. diff --git a/internal/agent/retrieval/agent.go b/internal/agent/retrieval/agent.go index fc86ac09..13614b6f 100644 --- a/internal/agent/retrieval/agent.go +++ b/internal/agent/retrieval/agent.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" "github.com/appsprout-dev/mnemonic/internal/concepts" "github.com/appsprout-dev/mnemonic/internal/events" "github.com/appsprout-dev/mnemonic/internal/llm" @@ -1213,23 +1214,6 @@ func hasAnyConcept(memoryConcepts, excluded []string) bool { return false } -// cosineSimilarity computes the cosine similarity between two embedding vectors. -// Returns 0.0 if either vector is empty or has zero magnitude. -func cosineSimilarity(a, b []float32) float32 { - if len(a) != len(b) || len(a) == 0 { - return 0.0 - } - var dot, normA, normB float64 - for i := range a { - dot += float64(a[i]) * float64(b[i]) - normA += float64(a[i]) * float64(a[i]) - normB += float64(b[i]) * float64(b[i]) - } - if normA == 0 || normB == 0 { - return 0.0 - } - return float32(dot / (math.Sqrt(normA) * math.Sqrt(normB))) -} // applyDiversityFilter reranks results using Maximal Marginal Relevance (MMR). // It iteratively selects results that balance relevance (original score) against @@ -1279,7 +1263,7 @@ func (ra *RetrievalAgent) applyDiversityFilter(results []store.RetrievalResult) if len(sel.Memory.Embedding) == 0 { continue } - sim := cosineSimilarity(candidate.Memory.Embedding, sel.Memory.Embedding) + sim := agentutil.CosineSimilarity(candidate.Memory.Embedding, sel.Memory.Embedding) if sim > maxSim { maxSim = sim } diff --git a/internal/agent/retrieval/diversity_test.go b/internal/agent/retrieval/diversity_test.go index 62c9ec05..1452fcb9 100644 --- a/internal/agent/retrieval/diversity_test.go +++ b/internal/agent/retrieval/diversity_test.go @@ -6,6 +6,7 @@ import ( "os" "testing" + "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" "github.com/appsprout-dev/mnemonic/internal/store" ) @@ -61,9 +62,9 @@ func TestCosineSimilarity(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := cosineSimilarity(tt.a, tt.b) + got := agentutil.CosineSimilarity(tt.a, tt.b) if math.Abs(float64(got-tt.want)) > 0.01 { - t.Errorf("cosineSimilarity() = %v, want %v", got, tt.want) + t.Errorf("CosineSimilarity() = %v, want %v", got, tt.want) } }) } diff --git a/internal/mathutil/cosine.go b/internal/mathutil/cosine.go new file mode 100644 index 00000000..4bef7065 --- /dev/null +++ b/internal/mathutil/cosine.go @@ -0,0 +1,25 @@ +package mathutil + +import "math" + +// CosineSimilarity computes cosine similarity between two embedding vectors. +// Accumulates in float64 for numerical precision with high-dimensional embeddings. +// Returns 0 if vectors are different lengths, empty, or have zero magnitude. +func CosineSimilarity(a, b []float32) float32 { + if len(a) != len(b) || len(a) == 0 { + return 0 + } + + var dot, normA, normB float64 + for i := range a { + dot += float64(a[i]) * float64(b[i]) + normA += float64(a[i]) * float64(a[i]) + normB += float64(b[i]) * float64(b[i]) + } + + if normA == 0 || normB == 0 { + return 0 + } + + return float32(dot / (math.Sqrt(normA) * math.Sqrt(normB))) +} diff --git a/internal/store/sqlite/abstractions.go b/internal/store/sqlite/abstractions.go index 42e870b8..a72a757c 100644 --- a/internal/store/sqlite/abstractions.go +++ b/internal/store/sqlite/abstractions.go @@ -7,6 +7,7 @@ import ( "sort" "time" + "github.com/appsprout-dev/mnemonic/internal/mathutil" store "github.com/appsprout-dev/mnemonic/internal/store" ) @@ -156,7 +157,7 @@ func (s *SQLiteStore) SearchAbstractionsByEmbedding(ctx context.Context, embeddi if len(emb) == 0 { continue } - score := cosineSimilarity(embedding, emb) + score := mathutil.CosineSimilarity(embedding, emb) candidates = append(candidates, candidate{id: id, score: score}) } diff --git a/internal/store/sqlite/patterns.go b/internal/store/sqlite/patterns.go index f4387b42..7d26a72e 100644 --- a/internal/store/sqlite/patterns.go +++ b/internal/store/sqlite/patterns.go @@ -7,6 +7,7 @@ import ( "sort" "time" + "github.com/appsprout-dev/mnemonic/internal/mathutil" store "github.com/appsprout-dev/mnemonic/internal/store" ) @@ -154,7 +155,7 @@ func (s *SQLiteStore) SearchPatternsByEmbedding(ctx context.Context, embedding [ if len(emb) == 0 { continue } - score := cosineSimilarity(embedding, emb) + score := mathutil.CosineSimilarity(embedding, emb) candidates = append(candidates, candidate{id: id, score: score}) } @@ -292,7 +293,7 @@ func (s *SQLiteStore) SearchPatternsByEmbeddingInProject(ctx context.Context, em if len(emb) == 0 { continue } - score := cosineSimilarity(embedding, emb) + score := mathutil.CosineSimilarity(embedding, emb) candidates = append(candidates, candidate{id: id, score: score}) } @@ -340,4 +341,3 @@ func (s *SQLiteStore) ArchiveAllPatterns(ctx context.Context) (int, error) { return int(n), nil } -// cosineSimilarity and sqrt32 are defined in embindex.go diff --git a/internal/store/sqlite/sqlite.go b/internal/store/sqlite/sqlite.go index 0b6f610d..ecd20f08 100644 --- a/internal/store/sqlite/sqlite.go +++ b/internal/store/sqlite/sqlite.go @@ -2573,25 +2573,6 @@ func boolToInt(b bool) int { return 0 } -// cosineSimilarity computes the cosine similarity between two embedding vectors. -func cosineSimilarity(a, b []float32) float32 { - if len(a) != len(b) || len(a) == 0 { - return 0 - } - - var dotProduct, normA, normB float32 - for i := 0; i < len(a); i++ { - dotProduct += a[i] * b[i] - normA += a[i] * a[i] - normB += b[i] * b[i] - } - - if normA == 0 || normB == 0 { - return 0 - } - - return dotProduct / (float32(math.Sqrt(float64(normA))) * float32(math.Sqrt(float64(normB)))) -} // --- MCP tool usage tracking --- From c8848cdf425b25148a375bffb0f9740068d404da Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 12:54:05 -0400 Subject: [PATCH 02/10] fix: add cancellation to HeuristicFilter cleanup goroutine Add done channel and Close() method to HeuristicFilter so the cleanupLoop goroutine can be stopped gracefully. Call Close() in the perception agent's Stop() method. Addresses finding #2 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/agent/perception/agent.go | 5 +++++ internal/agent/perception/heuristic.go | 17 +++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/internal/agent/perception/agent.go b/internal/agent/perception/agent.go index aa2af42b..79ea0fc8 100644 --- a/internal/agent/perception/agent.go +++ b/internal/agent/perception/agent.go @@ -202,6 +202,11 @@ func (pa *PerceptionAgent) Stop() error { // Wait for all processing goroutines to finish pa.processingWg.Wait() + // Stop the heuristic filter's cleanup goroutine + if pa.heuristicFilter != nil { + pa.heuristicFilter.Close() + } + pa.log.Info("perception agent stopped") return nil } diff --git a/internal/agent/perception/heuristic.go b/internal/agent/perception/heuristic.go index e72b0e7d..dc673b12 100644 --- a/internal/agent/perception/heuristic.go +++ b/internal/agent/perception/heuristic.go @@ -109,6 +109,8 @@ type HeuristicFilter struct { // Recall-aware salience: files recently recalled via MCP get a boost recalledFiles map[string]time.Time // path -> last recall time recallMu sync.RWMutex + + done chan struct{} // signals cleanupLoop to exit } // recentEdit tracks a file edit for batch detection. @@ -125,6 +127,7 @@ func NewHeuristicFilter(cfg HeuristicConfig, log *slog.Logger) *HeuristicFilter log: log, frequency: make(map[string][]frequencyEntry), recalledFiles: make(map[string]time.Time), + done: make(chan struct{}), } // Start a cleanup goroutine to periodically remove old entries @@ -133,13 +136,23 @@ func NewHeuristicFilter(cfg HeuristicConfig, log *slog.Logger) *HeuristicFilter return hf } +// Close stops the cleanup goroutine. Call this when the filter is no longer needed. +func (h *HeuristicFilter) Close() { + close(h.done) +} + // cleanupLoop periodically removes frequency entries older than the window. func (h *HeuristicFilter) cleanupLoop() { ticker := time.NewTicker(5 * time.Minute) defer ticker.Stop() - for range ticker.C { - h.cleanup() + for { + select { + case <-ticker.C: + h.cleanup() + case <-h.done: + return + } } } From 6a77042aba014e9c20530e7fc965e4fdcb0ae5bc Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 12:54:45 -0400 Subject: [PATCH 03/10] fix: cancel constructor context in encoding agent Start() The encoding agent constructor creates a default context for test-friendliness. Start() replaces it but never cancelled the original, leaking a context. Now cancels the old context before creating the new one. Addresses finding #3 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/agent/encoding/agent.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/agent/encoding/agent.go b/internal/agent/encoding/agent.go index fd6b1c5a..a77cf0b1 100644 --- a/internal/agent/encoding/agent.go +++ b/internal/agent/encoding/agent.go @@ -333,6 +333,10 @@ func (ea *EncodingAgent) Name() string { // Start begins the encoding agent's work. // It subscribes to RawMemoryCreated events and starts a polling fallback loop. func (ea *EncodingAgent) Start(ctx context.Context, bus events.Bus) error { + // Cancel the constructor's default context before replacing it + if ea.cancel != nil { + ea.cancel() + } ea.ctx, ea.cancel = context.WithCancel(ctx) ea.bus = bus From acf93244dc5c56f99d3291d54996c6868b6177d0 Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 12:57:38 -0400 Subject: [PATCH 04/10] refactor: consolidate config fallback helpers into agentutil Extract IntOr, Float32Or, Float64Or into agentutil/config.go. Remove duplicate cfgFloat64/cfgFloat32/cfgInt from consolidation agent and intOr/f32Or from retrieval agent. Addresses finding #7 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/agent/agentutil/config.go | 25 +++++++++++ internal/agent/consolidation/agent.go | 65 +++++++++------------------ internal/agent/retrieval/agent.go | 60 ++++++++++--------------- 3 files changed, 69 insertions(+), 81 deletions(-) create mode 100644 internal/agent/agentutil/config.go diff --git a/internal/agent/agentutil/config.go b/internal/agent/agentutil/config.go new file mode 100644 index 00000000..e2d2cb27 --- /dev/null +++ b/internal/agent/agentutil/config.go @@ -0,0 +1,25 @@ +package agentutil + +// IntOr returns val if non-zero, else fallback. +func IntOr(val, fallback int) int { + if val != 0 { + return val + } + return fallback +} + +// Float32Or returns val if non-zero, else fallback. +func Float32Or(val, fallback float32) float32 { + if val != 0 { + return val + } + return fallback +} + +// Float64Or returns val if non-zero, else fallback. +func Float64Or(val, fallback float64) float64 { + if val != 0 { + return val + } + return fallback +} diff --git a/internal/agent/consolidation/agent.go b/internal/agent/consolidation/agent.go index 2bff64a7..5ffb6fc7 100644 --- a/internal/agent/consolidation/agent.go +++ b/internal/agent/consolidation/agent.go @@ -103,29 +103,6 @@ func DefaultConfig() ConsolidationConfig { } } -// cfgFloat64 returns val if non-zero, else fallback. -func cfgFloat64(val, fallback float64) float64 { - if val != 0 { - return val - } - return fallback -} - -// cfgFloat32 returns val if non-zero, else fallback. -func cfgFloat32(val, fallback float32) float32 { - if val != 0 { - return val - } - return fallback -} - -// cfgInt returns val if non-zero, else fallback. -func cfgInt(val, fallback int) int { - if val != 0 { - return val - } - return fallback -} // ConsolidationAgent performs periodic memory consolidation — the "sleeping brain." // Each cycle: decay salience → transition states → prune associations → merge clusters → delete expired. @@ -435,14 +412,14 @@ func (ca *ConsolidationAgent) decaySalience(ctx context.Context) (decayed, proce // Recency protection: recently accessed memories use reduced decay exponent recencyFactor := 1.0 if hoursSinceAccess < 24 { - recencyFactor = cfgFloat64(ca.config.RecencyProtection24h, 0.8) + recencyFactor = agentutil.Float64Or(ca.config.RecencyProtection24h, 0.8) } else if hoursSinceAccess < 168 { // 7 days - recencyFactor = cfgFloat64(ca.config.RecencyProtection168h, 0.9) + recencyFactor = agentutil.Float64Or(ca.config.RecencyProtection168h, 0.9) } // Access count bonus: frequently accessed memories resist decay - resistScale := cfgFloat64(ca.config.AccessResistanceScale, 0.02) - resistCap := cfgFloat64(ca.config.AccessResistanceCap, 0.3) + resistScale := agentutil.Float64Or(ca.config.AccessResistanceScale, 0.02) + resistCap := agentutil.Float64Or(ca.config.AccessResistanceCap, 0.3) accessBonus := 1.0 - math.Min(float64(mem.AccessCount)*resistScale, resistCap) // Apply decay: new_salience = old * decay_rate^(recency * access_factor) @@ -607,7 +584,7 @@ func (ca *ConsolidationAgent) findClusters(memories []store.Memory) [][]store.Me return nil } - similarityThreshold := float32(cfgFloat64(ca.config.MergeSimilarityThreshold, 0.85)) + similarityThreshold := float32(agentutil.Float64Or(ca.config.MergeSimilarityThreshold, 0.85)) used := make(map[string]bool) var clusters [][]store.Memory @@ -876,7 +853,7 @@ func (ca *ConsolidationAgent) extractPatterns(ctx context.Context) (int, error) // processPatternClusters handles the common logic for evaluating a set of memory clusters // as potential patterns: strengthening existing matches or identifying new ones via LLM. func (ca *ConsolidationAgent) processPatternClusters(ctx context.Context, clusters [][]store.Memory, project string, budget int) int { - minSalience := cfgFloat32(ca.config.MinEvidenceSalience, 0.5) + minSalience := agentutil.Float32Or(ca.config.MinEvidenceSalience, 0.5) extracted := 0 for _, cluster := range clusters { if extracted >= budget { @@ -907,18 +884,18 @@ func (ca *ConsolidationAgent) processPatternClusters(ctx context.Context, cluste } if newEvidence > 0 { // Scale strength increment logarithmically to prevent saturation with large evidence counts - increment := cfgFloat32(ca.config.PatternStrengthIncrement, 0.03) * float32(math.Log2(1+float64(newEvidence))) - if len(cluster) >= cfgInt(ca.config.LargeClusterMinSize, 5) { - increment *= cfgFloat32(ca.config.LargeClusterBonus, 1.3) + increment := agentutil.Float32Or(ca.config.PatternStrengthIncrement, 0.03) * float32(math.Log2(1+float64(newEvidence))) + if len(cluster) >= agentutil.IntOr(ca.config.LargeClusterMinSize, 5) { + increment *= agentutil.Float32Or(ca.config.LargeClusterBonus, 1.3) } - incrementCap := cfgFloat32(ca.config.PatternIncrementCap, 0.15) + incrementCap := agentutil.Float32Or(ca.config.PatternIncrementCap, 0.15) if increment > incrementCap { increment = incrementCap } // Cap at ceiling unless pattern has strong evidence - maxStrength := cfgFloat32(ca.config.PatternStrengthCeiling, 0.95) - if len(existing.EvidenceIDs) > cfgInt(ca.config.StrongEvidenceMinCount, 10) { - maxStrength = cfgFloat32(ca.config.StrongEvidenceCeiling, 1.0) + maxStrength := agentutil.Float32Or(ca.config.PatternStrengthCeiling, 0.95) + if len(existing.EvidenceIDs) > agentutil.IntOr(ca.config.StrongEvidenceMinCount, 10) { + maxStrength = agentutil.Float32Or(ca.config.StrongEvidenceCeiling, 1.0) } existing.Strength = min32(existing.Strength+increment, maxStrength) } @@ -1137,7 +1114,7 @@ func (ca *ConsolidationAgent) findMatchingPattern(ctx context.Context, cluster [ } // Check if the top match is close enough - threshold := float32(cfgFloat64(ca.config.PatternMatchThreshold, 0.70)) + threshold := float32(agentutil.Float64Or(ca.config.PatternMatchThreshold, 0.70)) if len(patterns[0].Embedding) > 0 { sim := agentutil.CosineSimilarity(avgEmb, patterns[0].Embedding) if sim >= threshold { @@ -1569,26 +1546,26 @@ func (ca *ConsolidationAgent) decayPatterns(ctx context.Context) (int, error) { } // Apply baseline decay — self-sustaining requires healthy evidence - minEvidence := cfgInt(ca.config.SelfSustainingMinEvidence, 10) - minStrength := cfgFloat32(ca.config.SelfSustainingMinStrength, 0.9) + minEvidence := agentutil.IntOr(ca.config.SelfSustainingMinEvidence, 10) + minStrength := agentutil.Float32Or(ca.config.SelfSustainingMinStrength, 0.9) if len(p.EvidenceIDs) >= minEvidence && p.Strength >= minStrength && evidenceRatio >= 0.5 { - p.Strength *= cfgFloat32(ca.config.SelfSustainingDecay, 0.9999) + p.Strength *= agentutil.Float32Or(ca.config.SelfSustainingDecay, 0.9999) } else { - p.Strength *= cfgFloat32(ca.config.PatternBaselineDecay, 0.998) + p.Strength *= agentutil.Float32Or(ca.config.PatternBaselineDecay, 0.998) } // Evidence-based decay applies to all patterns (not just stale ones). // Patterns with dead evidence should decay regardless of access recency. if totalEvidence == 0 { - p.Strength *= cfgFloat32(ca.config.StaleDecayAggressive, 0.90) + p.Strength *= agentutil.Float32Or(ca.config.StaleDecayAggressive, 0.90) } else { switch { case evidenceRatio >= 0.5: // Healthy evidence — no additional decay beyond baseline case evidenceRatio >= 0.2: - p.Strength *= cfgFloat32(ca.config.StaleDecayModerate, 0.95) + p.Strength *= agentutil.Float32Or(ca.config.StaleDecayModerate, 0.95) default: - p.Strength *= cfgFloat32(ca.config.StaleDecayAggressive, 0.90) + p.Strength *= agentutil.Float32Or(ca.config.StaleDecayAggressive, 0.90) } } diff --git a/internal/agent/retrieval/agent.go b/internal/agent/retrieval/agent.go index 13614b6f..5198843f 100644 --- a/internal/agent/retrieval/agent.go +++ b/internal/agent/retrieval/agent.go @@ -131,20 +131,6 @@ func DefaultConfig() RetrievalConfig { } } -// helpers for zero-value fallback -func intOr(v, fallback int) int { - if v == 0 { - return fallback - } - return v -} - -func f32Or(v, fallback float32) float32 { - if v == 0 { - return fallback - } - return v -} // QueryRequest is the input for a retrieval query. type QueryRequest struct { @@ -212,8 +198,8 @@ func NewRetrievalAgent(s store.Store, llmProv llm.Provider, cfg RetrievalConfig, // Wire up activity-based recall boost if the event bus is available. if bus != nil { - windowMin := intOr(cfg.ContextBoostWindowMin, 30) - maxBoost := f32Or(cfg.ContextBoostMax, 0.2) + windowMin := agentutil.IntOr(cfg.ContextBoostWindowMin, 30) + maxBoost := agentutil.Float32Or(cfg.ContextBoostMax, 0.2) ra.activity = newActivityTracker(windowMin, maxBoost) bus.Subscribe(events.TypeWatcherEvent, func(ctx context.Context, event events.Event) error { we, ok := event.(events.WatcherEvent) @@ -263,8 +249,8 @@ func (ra *RetrievalAgent) SyncActivity(snap map[string]time.Time) { if ra.activity == nil { // Create a tracker on-the-fly for MCP processes that don't have a bus. ra.activity = newActivityTracker( - intOr(ra.config.ContextBoostWindowMin, 30), - f32Or(ra.config.ContextBoostMax, 0.2), + agentutil.IntOr(ra.config.ContextBoostWindowMin, 30), + agentutil.Float32Or(ra.config.ContextBoostMax, 0.2), ) } ra.activity.loadSnapshot(snap) @@ -298,7 +284,7 @@ func (ra *RetrievalAgent) Query(ctx context.Context, req QueryRequest) (QueryRes ra.log.Debug("query concepts extracted", "query_id", queryID, "concepts_count", len(concepts)) // Step 2: Find entry points via full-text search - ftsResults, err := ra.store.SearchByFullText(ctx, req.Query, intOr(ra.config.FTSCandidateLimit, 10)) + ftsResults, err := ra.store.SearchByFullText(ctx, req.Query, agentutil.IntOr(ra.config.FTSCandidateLimit, 10)) if err != nil { ra.log.Warn("full-text search failed", "query_id", queryID, "error", err) ftsResults = []store.Memory{} @@ -311,7 +297,7 @@ func (ra *RetrievalAgent) Query(ctx context.Context, req QueryRequest) (QueryRes if err != nil { ra.log.Warn("embedding generation failed", "query_id", queryID, "error", err) } else { - embeddingResults, err = ra.store.SearchByEmbedding(ctx, embedding, intOr(ra.config.EmbeddingCandidateLimit, 10)) + embeddingResults, err = ra.store.SearchByEmbedding(ctx, embedding, agentutil.IntOr(ra.config.EmbeddingCandidateLimit, 10)) if err != nil { ra.log.Warn("embedding search failed", "query_id", queryID, "error", err) embeddingResults = []store.RetrievalResult{} @@ -335,8 +321,8 @@ func (ra *RetrievalAgent) Query(ctx context.Context, req QueryRequest) (QueryRes entryPoints := ra.mergeEntryPoints(ftsResults, embeddingResults) // Inject time-range results as additional entry points with a moderate base score - timeBase := f32Or(ra.config.TimeRangeBaseScore, 0.3) - timeSalWt := f32Or(ra.config.TimeRangeSalienceWt, 0.2) + timeBase := agentutil.Float32Or(ra.config.TimeRangeBaseScore, 0.3) + timeSalWt := agentutil.Float32Or(ra.config.TimeRangeSalienceWt, 0.2) for _, mem := range timeRangeResults { if _, exists := entryPoints[mem.ID]; !exists { entryPoints[mem.ID] = timeBase + timeSalWt*mem.Salience @@ -380,9 +366,9 @@ func (ra *RetrievalAgent) Query(ctx context.Context, req QueryRequest) (QueryRes var patterns []store.Pattern var pErr error if req.Project != "" { - patterns, pErr = ra.store.SearchPatternsByEmbeddingInProject(ctx, embedding, req.Project, intOr(ra.config.PatternSearchLimit, 5)) + patterns, pErr = ra.store.SearchPatternsByEmbeddingInProject(ctx, embedding, req.Project, agentutil.IntOr(ra.config.PatternSearchLimit, 5)) } else { - patterns, pErr = ra.store.SearchPatternsByEmbedding(ctx, embedding, intOr(ra.config.PatternSearchLimit, 5)) + patterns, pErr = ra.store.SearchPatternsByEmbedding(ctx, embedding, agentutil.IntOr(ra.config.PatternSearchLimit, 5)) } if pErr != nil { ra.log.Warn("pattern search failed", "query_id", queryID, "error", pErr) @@ -392,7 +378,7 @@ func (ra *RetrievalAgent) Query(ctx context.Context, req QueryRequest) (QueryRes } if req.IncludeAbstractions { - abs, err := ra.store.SearchAbstractionsByEmbedding(ctx, embedding, intOr(ra.config.AbstractionSearchLimit, 5)) + abs, err := ra.store.SearchAbstractionsByEmbedding(ctx, embedding, agentutil.IntOr(ra.config.AbstractionSearchLimit, 5)) if err != nil { ra.log.Warn("abstraction search failed", "query_id", queryID, "error", err) } else { @@ -476,9 +462,9 @@ func (ra *RetrievalAgent) mergeEntryPoints(ftsResults []store.Memory, embeddingR // blended with salience as a secondary importance signal. // Before this fix, all FTS results got ~0.49 after consolidation decay, // discarding the BM25 rank-order information entirely. - ftsRankWt := f32Or(ra.config.FTSRankWeight, 0.7) - ftsSalWt := f32Or(ra.config.FTSSalienceWeight, 0.3) - defaultSal := f32Or(ra.config.DefaultSalience, 0.5) + ftsRankWt := agentutil.Float32Or(ra.config.FTSRankWeight, 0.7) + ftsSalWt := agentutil.Float32Or(ra.config.FTSSalienceWeight, 0.3) + defaultSal := agentutil.Float32Or(ra.config.DefaultSalience, 0.5) for i, mem := range ftsResults { rankScore := float32(1.0) / float32(i+1) // reciprocal rank: 1.0, 0.5, 0.33, ... salience := mem.Salience @@ -655,7 +641,7 @@ func (ra *RetrievalAgent) rankResults(ctx context.Context, activated map[string] ra.log.Warn("failed to fetch feedback scores for ranking", "error", err) feedbackScores = nil } - feedbackWt := f32Or(ra.config.FeedbackWeight, 0.15) + feedbackWt := agentutil.Float32Or(ra.config.FeedbackWeight, 0.15) scored := make([]scoredMemory, 0, len(activated)) @@ -673,13 +659,13 @@ func (ra *RetrievalAgent) rankResults(ctx context.Context, activated map[string] } else { daysSinceAccess = float32(time.Since(mem.LastAccessed).Hours() / 24) } - recencyWt := f32Or(ra.config.RecencyBoostWeight, 0.2) - recencyHL := f32Or(ra.config.RecencyHalfLifeDays, 30) + recencyWt := agentutil.Float32Or(ra.config.RecencyBoostWeight, 0.2) + recencyHL := agentutil.Float32Or(ra.config.RecencyHalfLifeDays, 30) recencyBonus := recencyWt * float32(math.Exp(float64(-daysSinceAccess/recencyHL))) // Hebbian activity bonus — frequently traversed associations indicate relevance - actMax := float64(f32Or(ra.config.ActivityBonusMax, 0.2)) - actScale := float64(f32Or(ra.config.ActivityBonusScale, 0.02)) + actMax := float64(agentutil.Float32Or(ra.config.ActivityBonusMax, 0.2)) + actScale := float64(agentutil.Float32Or(ra.config.ActivityBonusScale, 0.02)) activityBonus := float32(math.Min(actMax, actScale*math.Log1p(float64(state.activationCount)))) // Context boost from recent watcher activity (only for eligible sources) @@ -699,9 +685,9 @@ func (ra *RetrievalAgent) rankResults(ctx context.Context, activated map[string] if attrErr == nil { switch attrs.Significance { case "critical": - baseScore *= f32Or(ra.config.CriticalBoost, 1.2) + baseScore *= agentutil.Float32Or(ra.config.CriticalBoost, 1.2) case "important": - baseScore *= f32Or(ra.config.ImportantBoost, 1.1) + baseScore *= agentutil.Float32Or(ra.config.ImportantBoost, 1.1) } } @@ -1224,8 +1210,8 @@ func (ra *RetrievalAgent) applyDiversityFilter(results []store.RetrievalResult) return results } - lambda := f32Or(ra.config.DiversityLambda, 0.7) - threshold := f32Or(ra.config.DiversityThreshold, 0.85) + lambda := agentutil.Float32Or(ra.config.DiversityLambda, 0.7) + threshold := agentutil.Float32Or(ra.config.DiversityThreshold, 0.85) // Normalize scores to [0,1] for fair MMR blending maxScore := results[0].Score // results are pre-sorted by score descending From dc836b1b8d8173e060e3d403a0053080fef6de0d Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 12:59:13 -0400 Subject: [PATCH 05/10] refactor: make CORS allowed origins configurable Add AllowedOrigins field to APIConfig and wire it through the API server. The built-in localhost defaults are used when no origins are configured, preserving current behavior. Addresses finding #4 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- cmd/mnemonic/main.go | 1 + internal/api/routes/ws.go | 36 +++++++++++++++++++++++++++++------- internal/api/server.go | 4 +++- internal/config/config.go | 9 +++++---- 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/cmd/mnemonic/main.go b/cmd/mnemonic/main.go index ed7010f7..cc26a4fc 100644 --- a/cmd/mnemonic/main.go +++ b/cmd/mnemonic/main.go @@ -1776,6 +1776,7 @@ func serveCommand(configPath string) { Port: cfg.API.Port, RequestTimeoutSec: cfg.API.RequestTimeoutSec, Token: cfg.API.Token, + AllowedOrigins: cfg.API.AllowedOrigins, }, apiDeps) if err := apiServer.Start(); err != nil { diff --git a/internal/api/routes/ws.go b/internal/api/routes/ws.go index 6e7689e2..8c0606d5 100644 --- a/internal/api/routes/ws.go +++ b/internal/api/routes/ws.go @@ -30,15 +30,37 @@ type wsConn struct { log *slog.Logger } +// defaultOrigins is the built-in set of allowed CORS/WebSocket origins. +var defaultOrigins = []string{ + "http://localhost:3000", + "http://localhost:8080", + "http://127.0.0.1:3000", + "http://127.0.0.1:8080", + "http://localhost:9999", + "http://127.0.0.1:9999", +} + // AllowedOrigins is the set of origins allowed for CORS and WebSocket connections. // Used by both the CORS middleware in server.go and the WebSocket upgrader here. -var AllowedOrigins = map[string]bool{ - "http://localhost:3000": true, - "http://localhost:8080": true, - "http://127.0.0.1:3000": true, - "http://127.0.0.1:8080": true, - "http://localhost:9999": true, - "http://127.0.0.1:9999": true, +// Populated by SetAllowedOrigins; defaults applied if never called. +var AllowedOrigins = buildOriginMap(defaultOrigins) + +// SetAllowedOrigins replaces the allowed origins map. If origins is empty, +// the built-in defaults are used. +func SetAllowedOrigins(origins []string) { + if len(origins) == 0 { + AllowedOrigins = buildOriginMap(defaultOrigins) + return + } + AllowedOrigins = buildOriginMap(origins) +} + +func buildOriginMap(origins []string) map[string]bool { + m := make(map[string]bool, len(origins)) + for _, o := range origins { + m[o] = true + } + return m } // upgrader is the WebSocket upgrader with default settings. diff --git a/internal/api/server.go b/internal/api/server.go index e34cedce..64bb97ce 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -22,7 +22,8 @@ type ServerConfig struct { Host string Port int RequestTimeoutSec int - Token string // bearer token for API auth (empty = no auth) + Token string // bearer token for API auth (empty = no auth) + AllowedOrigins []string // CORS/WebSocket allowed origins (empty = defaults) } // ServerDeps holds dependencies injected into the server. @@ -63,6 +64,7 @@ func NewServer(cfg ServerConfig, deps ServerDeps) *Server { mux: mux, } + routes.SetAllowedOrigins(cfg.AllowedOrigins) s.registerRoutes() addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port) diff --git a/internal/config/config.go b/internal/config/config.go index ec7789dc..8bee9dfd 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -426,10 +426,11 @@ type AgentSDKConfig struct { // APIConfig holds API server settings. type APIConfig struct { - Host string `yaml:"host"` - Port int `yaml:"port"` - RequestTimeoutSec int `yaml:"request_timeout_sec"` - Token string `yaml:"token"` // bearer token for API auth (empty = no auth) + Host string `yaml:"host"` + Port int `yaml:"port"` + RequestTimeoutSec int `yaml:"request_timeout_sec"` + Token string `yaml:"token"` // bearer token for API auth (empty = no auth) + AllowedOrigins []string `yaml:"allowed_origins"` // CORS/WebSocket allowed origins (empty = defaults) } // WebConfig holds web UI settings. From 2b2eabe80c1859dba51d1617971255e61402b745 Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 13:00:13 -0400 Subject: [PATCH 06/10] refactor: replace LIKE concept search with FTS5 SearchByConcepts and SearchByConceptsInProject now use the existing memories_fts virtual table with column-scoped queries (concepts:term*) instead of LIKE '%concept%' OR chains. Eliminates full table scans and scales properly beyond 5K memories. Addresses finding #6 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/store/sqlite/sqlite.go | 86 ++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 39 deletions(-) diff --git a/internal/store/sqlite/sqlite.go b/internal/store/sqlite/sqlite.go index ecd20f08..8e368808 100644 --- a/internal/store/sqlite/sqlite.go +++ b/internal/store/sqlite/sqlite.go @@ -1237,38 +1237,26 @@ func (s *SQLiteStore) SearchByEmbedding(ctx context.Context, embedding []float32 return results, nil } -// SearchByConcepts searches for memories by concepts. +// SearchByConcepts searches for memories by concepts using FTS5. func (s *SQLiteStore) SearchByConcepts(ctx context.Context, concepts []string, limit int) ([]store.Memory, error) { if len(concepts) == 0 { return []store.Memory{}, nil } - // Build LIKE conditions for concept matching - query := ` - SELECT ` + memoryColumns + ` - FROM memories - WHERE ` - - args := make([]interface{}, 0) - conditions := make([]string, 0) - - for _, concept := range concepts { - conditions = append(conditions, "concepts LIKE ?") - args = append(args, "%"+concept+"%") - } - - // Join conditions with OR - for i, cond := range conditions { - query += cond - if i < len(conditions)-1 { - query += " OR " - } + ftsQuery := buildConceptFTSQuery(concepts) + if ftsQuery == "" { + return []store.Memory{}, nil } - query += ` ORDER BY salience DESC LIMIT ?` - args = append(args, limit) + query := ` + SELECT ` + memoryColumns + ` + FROM memories m + JOIN memories_fts ON m.rowid = memories_fts.rowid + WHERE memories_fts MATCH ? + ORDER BY m.salience DESC + LIMIT ?` - rows, err := s.db.QueryContext(ctx, query, args...) + rows, err := s.db.QueryContext(ctx, query, ftsQuery, limit) if err != nil { return nil, fmt.Errorf("failed to search by concepts: %w", err) } @@ -1282,28 +1270,24 @@ func (s *SQLiteStore) SearchByConceptsInProject(ctx context.Context, concepts [] return []store.Memory{}, nil } - query := ` - SELECT ` + memoryColumns + ` - FROM memories - WHERE (` - - args := make([]interface{}, 0) - for i, concept := range concepts { - if i > 0 { - query += " OR " - } - query += "concepts LIKE ?" - args = append(args, "%"+concept+"%") + ftsQuery := buildConceptFTSQuery(concepts) + if ftsQuery == "" { + return []store.Memory{}, nil } - query += ")" + args := []interface{}{ftsQuery} + query := ` + SELECT ` + memoryColumns + ` + FROM memories m + JOIN memories_fts ON m.rowid = memories_fts.rowid + WHERE memories_fts MATCH ?` if project != "" { - query += " AND project = ?" + query += " AND m.project = ?" args = append(args, project) } - query += ` ORDER BY salience DESC LIMIT ?` + query += ` ORDER BY m.salience DESC LIMIT ?` args = append(args, limit) rows, err := s.db.QueryContext(ctx, query, args...) @@ -1314,6 +1298,30 @@ func (s *SQLiteStore) SearchByConceptsInProject(ctx context.Context, concepts [] return scanMemoryRows(rows) } +// buildConceptFTSQuery builds an FTS5 MATCH expression scoped to the concepts column. +// Each concept is sanitized and joined with OR. Uses prefix matching for substring-like behavior. +func buildConceptFTSQuery(concepts []string) string { + terms := make([]string, 0, len(concepts)) + for _, c := range concepts { + cleaned := strings.Map(func(r rune) rune { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') { + return r + } + return -1 + }, c) + cleaned = strings.ToLower(cleaned) + if cleaned == "" || len(cleaned) < 2 { + continue + } + // Scope to the concepts column with prefix matching + terms = append(terms, "concepts:"+cleaned+"*") + } + if len(terms) == 0 { + return "" + } + return strings.Join(terms, " OR ") +} + // Association Operations // CreateAssociation creates a new association between two memories. From e1ff61f036c0c22a40c6d7ffd3ad69e11d5fbbcb Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 13:06:17 -0400 Subject: [PATCH 07/10] refactor: extract shared encoding finalization into persistEncodedMemory Both encodeMemory and finalizeEncodedMemory had ~145 lines of identical finalization logic (dedup check, memory write, resolution, concepts, attributes, associations, event publishing). Extract into persistEncodedMemory with a persistResult return type. Both callers now delegate to it. Addresses finding #8 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/agent/encoding/agent.go | 276 +++++-------------------------- 1 file changed, 40 insertions(+), 236 deletions(-) diff --git a/internal/agent/encoding/agent.go b/internal/agent/encoding/agent.go index a77cf0b1..2890924e 100644 --- a/internal/agent/encoding/agent.go +++ b/internal/agent/encoding/agent.go @@ -709,22 +709,30 @@ func (ea *EncodingAgent) compressRawMemory(ctx context.Context, raw store.RawMem return compression, embeddingText, nil } -// finalizeEncodedMemory handles steps 4-7 of encoding: association creation, store write, etc. -func (ea *EncodingAgent) finalizeEncodedMemory(ctx context.Context, raw store.RawMemory, compression *compressionResponse, embedding []float32) error { +// persistResult describes the outcome of persistEncodedMemory. +type persistResult struct { + deduplicated bool // near-duplicate found and boosted — no new memory created + raceDedup bool // another process encoded this raw concurrently + memoryID string // set only when a new memory was created +} + +// persistEncodedMemory handles the shared finalization path: dedup check, +// memory write, resolution/concept/attribute writes, association creation, +// and event publishing. Both finalizeEncodedMemory and encodeMemory delegate here. +func (ea *EncodingAgent) persistEncodedMemory(ctx context.Context, raw store.RawMemory, compression *compressionResponse, embedding []float32) (*persistResult, error) { + // Search for similar memories and check for duplicates var associations []store.Association if len(embedding) > 0 { similar, err := ea.store.SearchByEmbedding(ctx, embedding, ea.config.MaxSimilarSearchResults) if err != nil { ea.log.Warn("failed to search for similar memories", "raw_id", raw.ID, "error", err) } else { - // Check for near-duplicate before creating a new memory dc := ea.buildDedupContext(raw) if dup := findDuplicate(similar, dc); dup != nil { ea.log.Info("dedup: boosting existing memory instead of creating duplicate", "raw_id", raw.ID, "existing_id", dup.Memory.ID, "similarity", dup.Score) - // Boost existing memory's salience (capped at 1.0) newSalience := dup.Memory.Salience + 0.05 if newSalience > 1.0 { newSalience = 1.0 @@ -735,8 +743,7 @@ func (ea *EncodingAgent) finalizeEncodedMemory(ctx context.Context, raw store.Ra if err := ea.store.IncrementAccess(ctx, dup.Memory.ID); err != nil { ea.log.Warn("dedup: failed to increment access", "memory_id", dup.Memory.ID, "error", err) } - // Raw was already claimed — no MarkRawProcessed needed. - return nil + return &persistResult{deduplicated: true}, nil } for _, result := range similar { @@ -780,9 +787,9 @@ func (ea *EncodingAgent) finalizeEncodedMemory(ctx context.Context, raw store.Ra if err := ea.store.WriteMemory(ctx, memory); err != nil { if errors.Is(err, store.ErrDuplicateRawID) { ea.log.Info("dedup: another process already encoded this raw memory", "raw_id", raw.ID) - return nil + return &persistResult{raceDedup: true}, nil } - return fmt.Errorf("failed to write encoded memory: %w", err) + return nil, fmt.Errorf("failed to write encoded memory: %w", err) } ea.log.Debug("memory written to store", "memory_id", memoryID, "raw_id", raw.ID) @@ -889,29 +896,37 @@ func (ea *EncodingAgent) finalizeEncodedMemory(ctx context.Context, raw store.Ra } } - // Raw was already claimed (processed=1) by pollAndProcessRawMemories before - // compression started. No additional MarkRawProcessed needed. - // Publish events if ea.bus != nil { - _ = ea.bus.Publish(ctx, events.MemoryEncoded{ + if err := ea.bus.Publish(ctx, events.MemoryEncoded{ MemoryID: memoryID, RawID: raw.ID, Concepts: memory.Concepts, AssociationsCreated: associationsCreated, Ts: time.Now(), - }) + }); err != nil { + ea.log.Warn("failed to publish MemoryEncoded event", "memory_id", memoryID, "error", err) + } if len(classificationCandidates) > 0 { - _ = ea.bus.Publish(ctx, events.AssociationsPendingClassification{ + if err := ea.bus.Publish(ctx, events.AssociationsPendingClassification{ Candidates: classificationCandidates, Ts: time.Now(), - }) + }); err != nil { + ea.log.Warn("failed to publish classification event", "memory_id", memoryID, "error", err) + } } } ea.log.Info("memory encoding completed", "memory_id", memoryID, "raw_id", raw.ID, "concepts", len(memory.Concepts), "associations_created", associationsCreated) - return nil + return &persistResult{memoryID: memoryID}, nil +} + +// finalizeEncodedMemory handles steps 4-7 of encoding for the batch processing path. +// Delegates to persistEncodedMemory for the actual work. +func (ea *EncodingAgent) finalizeEncodedMemory(ctx context.Context, raw store.RawMemory, compression *compressionResponse, embedding []float32) error { + _, err := ea.persistEncodedMemory(ctx, raw, compression, embedding) + return err } // handleEncodingFailure tracks failures and applies backoff when needed. @@ -1035,230 +1050,19 @@ func (ea *EncodingAgent) encodeMemory(ctx context.Context, rawID string) error { ea.log.Debug("embedding generated successfully", "raw_id", raw.ID, "dims", len(embedding)) } - // Step 4: Search for similar memories and check for duplicates - var associations []store.Association - associationsCreated := 0 - if len(embedding) > 0 { - similar, err := ea.store.SearchByEmbedding(ctx, embedding, ea.config.MaxSimilarSearchResults) - if err != nil { - ea.log.Warn("failed to search for similar memories", "raw_id", raw.ID, "error", err) - } else { - ea.log.Debug("similarity search completed", "raw_id", raw.ID, "results", len(similar)) - - // Dedup check: if a near-duplicate already exists, boost it instead of creating a new memory - dc := ea.buildDedupContext(raw) - if dup := findDuplicate(similar, dc); dup != nil { - ea.log.Info("dedup: boosting existing memory instead of creating duplicate", - "raw_id", raw.ID, "existing_id", dup.Memory.ID, "similarity", dup.Score) - newSalience := dup.Memory.Salience + 0.05 - if newSalience > 1.0 { - newSalience = 1.0 - } - if err := ea.store.UpdateSalience(ctx, dup.Memory.ID, newSalience); err != nil { - ea.log.Warn("dedup: failed to boost salience", "memory_id", dup.Memory.ID, "error", err) - } - if err := ea.store.IncrementAccess(ctx, dup.Memory.ID); err != nil { - ea.log.Warn("dedup: failed to increment access", "memory_id", dup.Memory.ID, "error", err) - } - // Raw was already claimed in Step 0 — no MarkRawProcessed needed. - claimed = false // dedup success — don't unclaim - return nil - } - - // Step 5: Create associations for similar memories above threshold - for _, result := range similar { - if result.Score > ea.config.SimilarityThreshold { - // Classify the relationship type - relationType := ea.classifyRelationship(ctx, compression, result.Memory, raw) - - assoc := store.Association{ - SourceID: raw.ID, // Will be replaced with memory ID after storage - TargetID: result.Memory.ID, - Strength: result.Score, - RelationType: relationType, - CreatedAt: time.Now(), - LastActivated: time.Now(), - } - associations = append(associations, assoc) - } - } - } - } - - // Generate memory ID - memoryID := uuid.New().String() - - // Step 6: Write the encoded Memory to the store - memory := store.Memory{ - ID: memoryID, - RawID: raw.ID, - Timestamp: raw.Timestamp, - Type: raw.Type, - Content: compression.Content, - Summary: compression.Summary, - Concepts: compression.Concepts, - Embedding: embedding, - Salience: compression.Salience, - AccessCount: 0, - LastAccessed: time.Time{}, - State: "active", - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - EpisodeID: getEpisodeIDForRaw(ea, ctx, raw), - Source: raw.Source, - Project: raw.Project, - SessionID: raw.SessionID, + // Steps 4-8: Persist the encoded memory (dedup, write, associations, events) + result, err := ea.persistEncodedMemory(ctx, raw, compression, embedding) + if err != nil { + return err } - - if err := ea.store.WriteMemory(ctx, memory); err != nil { - // UNIQUE constraint on raw_id: another process encoded this raw memory - // between our claim and our write. Treat as successful dedup. - if errors.Is(err, store.ErrDuplicateRawID) { - ea.log.Info("dedup: another process already encoded this raw memory", "raw_id", raw.ID) - claimed = false // don't unclaim — encoding succeeded elsewhere - return nil - } - return fmt.Errorf("failed to write encoded memory: %w", err) + // Dedup or race dedup — encoding is handled, don't unclaim + if result.deduplicated || result.raceDedup { + claimed = false + return nil } - - // Encoding succeeded — don't unclaim on defer. + // Encoding succeeded — don't unclaim on defer claimed = false - ea.log.Debug("memory written to store", "memory_id", memoryID, "raw_id", raw.ID) - - // Store multi-resolution data - resolution := store.MemoryResolution{ - MemoryID: memoryID, - Gist: compression.Gist, - Narrative: compression.Narrative, - DetailRawIDs: []string{raw.ID}, - CreatedAt: time.Now(), - } - if err := ea.store.WriteMemoryResolution(ctx, resolution); err != nil { - ea.log.Warn("failed to write memory resolution", "error", err) - } - - // Store structured concepts - if compression.StructuredConcepts != nil { - cs := store.ConceptSet{ - MemoryID: memoryID, - Significance: compression.Significance, - CreatedAt: time.Now(), - } - for _, t := range compression.StructuredConcepts.Topics { - cs.Topics = append(cs.Topics, store.Topic{Label: t.Label, Path: t.Path}) - } - for _, e := range compression.StructuredConcepts.Entities { - cs.Entities = append(cs.Entities, store.Entity{Name: e.Name, Type: e.Type, Context: e.Context}) - } - for _, a := range compression.StructuredConcepts.Actions { - cs.Actions = append(cs.Actions, store.Action{Verb: a.Verb, Object: a.Object, Details: a.Details}) - } - for _, c := range compression.StructuredConcepts.Causality { - cs.Causality = append(cs.Causality, store.CausalLink{Relation: c.Relation, Description: c.Description}) - } - if err := ea.store.WriteConceptSet(ctx, cs); err != nil { - ea.log.Warn("failed to write concept set", "error", err) - } - } - - // Store memory attributes - attrs := store.MemoryAttributes{ - MemoryID: memoryID, - Significance: compression.Significance, - EmotionalTone: compression.EmotionalTone, - Outcome: compression.Outcome, - CreatedAt: time.Now(), - } - if err := ea.store.WriteMemoryAttributes(ctx, attrs); err != nil { - ea.log.Warn("failed to write memory attributes", "error", err) - } - - // Now update associations with the actual memory ID and collect candidates for LLM reclassification - var classificationCandidates []events.AssocCandidate - for i := range associations { - associations[i].SourceID = memoryID - if err := ea.store.CreateAssociation(ctx, associations[i]); err != nil { - ea.log.Warn("failed to create association", "source_id", associations[i].SourceID, - "target_id", associations[i].TargetID, "error", err) - } else { - associationsCreated++ - // Collect "similar" (catch-all) associations for potential LLM reclassification - if ea.config.EnableLLMClassification && associations[i].RelationType == "similar" { - targetMem, err := ea.store.GetMemory(ctx, associations[i].TargetID) - if err == nil { - classificationCandidates = append(classificationCandidates, events.AssocCandidate{ - SourceID: memoryID, - TargetID: associations[i].TargetID, - Summary1: compression.Summary, - Summary2: targetMem.Summary, - }) - } - } - } - } - - // Create explicit associations from metadata (set via MCP remember associate_with param). - if rawAssoc, ok := raw.Metadata["explicit_associations"]; ok { - if assocList, ok := rawAssoc.([]interface{}); ok { - for _, entry := range assocList { - if m, ok := entry.(map[string]interface{}); ok { - targetID, _ := m["memory_id"].(string) - relation, _ := m["relation"].(string) - if targetID == "" || relation == "" { - continue - } - assoc := store.Association{ - SourceID: memoryID, - TargetID: targetID, - Strength: 0.9, - RelationType: relation, - CreatedAt: time.Now(), - LastActivated: time.Now(), - ActivationCount: 1, - } - if err := ea.store.CreateAssociation(ctx, assoc); err != nil { - ea.log.Warn("failed to create explicit association", - "source_id", memoryID, "target_id", targetID, "error", err) - } else { - associationsCreated++ - } - } - } - } - } - - // Step 7: Raw was already claimed (processed=1) in Step 0. No additional mark needed. - - // Step 8: Publish MemoryEncoded event - encodedEvent := events.MemoryEncoded{ - MemoryID: memoryID, - RawID: raw.ID, - Concepts: memory.Concepts, - AssociationsCreated: associationsCreated, - Ts: time.Now(), - } - - if ea.bus != nil { - if err := ea.bus.Publish(ctx, encodedEvent); err != nil { - ea.log.Warn("failed to publish MemoryEncoded event", "memory_id", memoryID, "error", err) - } - } - - // Publish classification candidates for background LLM reclassification - if ea.bus != nil && len(classificationCandidates) > 0 { - classEvent := events.AssociationsPendingClassification{ - Candidates: classificationCandidates, - Ts: time.Now(), - } - if err := ea.bus.Publish(ctx, classEvent); err != nil { - ea.log.Warn("failed to publish classification event", "memory_id", memoryID, "error", err) - } - } - - ea.log.Info("memory encoding completed", "memory_id", memoryID, "raw_id", raw.ID, - "concepts", len(memory.Concepts), "associations_created", associationsCreated) - return nil } From a59ba04968c65b42b997307f8099972839544c18 Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 13:10:47 -0400 Subject: [PATCH 08/10] refactor: extract hardcoded heuristic filter lists to config Move 110 hardcoded filter entries (ignored paths, lockfiles, app dirs, sensitive files, commands, keywords, etc.) into package-level defaults in heuristic_defaults.go. Add Extra* config fields that extend the defaults without replacing them, merged at construction time. Addresses finding #8 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- cmd/mnemonic/main.go | 29 +++-- internal/agent/perception/heuristic.go | 110 ++++++++---------- .../agent/perception/heuristic_defaults.go | 77 ++++++++++++ internal/config/config.go | 13 +++ 4 files changed, 160 insertions(+), 69 deletions(-) create mode 100644 internal/agent/perception/heuristic_defaults.go diff --git a/cmd/mnemonic/main.go b/cmd/mnemonic/main.go index cc26a4fc..cb4229f5 100644 --- a/cmd/mnemonic/main.go +++ b/cmd/mnemonic/main.go @@ -1518,15 +1518,26 @@ func serveCommand(configPath string) { wrap("perception"), perception.PerceptionConfig{ HeuristicConfig: perception.HeuristicConfig{ - MinContentLength: cfg.Perception.Heuristics.MinContentLength, - MaxContentLength: cfg.Perception.Heuristics.MaxContentLength, - FrequencyThreshold: cfg.Perception.Heuristics.FrequencyThreshold, - FrequencyWindowMin: cfg.Perception.Heuristics.FrequencyWindowMin, - PassScore: float32(cfg.Perception.HeuristicPassScore), - BatchEditWindowSec: cfg.Perception.BatchEditWindowSec, - BatchEditThreshold: cfg.Perception.BatchEditThreshold, - RecallBoostMax: float32(cfg.Perception.RecallBoostMax), - RecallBoostMinutes: cfg.Perception.RecallBoostWindowMin, + MinContentLength: cfg.Perception.Heuristics.MinContentLength, + MaxContentLength: cfg.Perception.Heuristics.MaxContentLength, + FrequencyThreshold: cfg.Perception.Heuristics.FrequencyThreshold, + FrequencyWindowMin: cfg.Perception.Heuristics.FrequencyWindowMin, + PassScore: float32(cfg.Perception.HeuristicPassScore), + BatchEditWindowSec: cfg.Perception.BatchEditWindowSec, + BatchEditThreshold: cfg.Perception.BatchEditThreshold, + RecallBoostMax: float32(cfg.Perception.RecallBoostMax), + RecallBoostMinutes: cfg.Perception.RecallBoostWindowMin, + ExtraIgnoredPatterns: cfg.Perception.Heuristics.ExtraIgnoredPatterns, + ExtraLockfileNames: cfg.Perception.Heuristics.ExtraLockfileNames, + ExtraAppInternalDirs: cfg.Perception.Heuristics.ExtraAppInternalDirs, + ExtraSensitiveNames: cfg.Perception.Heuristics.ExtraSensitiveNames, + ExtraSourceExtensions: cfg.Perception.Heuristics.ExtraSourceExtensions, + ExtraTrivialCommands: cfg.Perception.Heuristics.ExtraTrivialCommands, + ExtraHighSignalCommands: cfg.Perception.Heuristics.ExtraHighSignalCommands, + ExtraCodeIndicators: cfg.Perception.Heuristics.ExtraCodeIndicators, + ExtraHighSignalKeywords: cfg.Perception.Heuristics.ExtraHighSignalKeywords, + ExtraMediumKeywords: cfg.Perception.Heuristics.ExtraMediumKeywords, + ExtraLowKeywords: cfg.Perception.Heuristics.ExtraLowKeywords, Scoring: perception.ScoringConfig{ BaseFilesystem: cfg.Perception.Scoring.BaseFilesystem, BaseTerminal: cfg.Perception.Scoring.BaseTerminal, diff --git a/internal/agent/perception/heuristic.go b/internal/agent/perception/heuristic.go index dc673b12..0b97573f 100644 --- a/internal/agent/perception/heuristic.go +++ b/internal/agent/perception/heuristic.go @@ -37,6 +37,19 @@ type HeuristicConfig struct { RecallBoostMax float32 // max recall salience boost (default: 0.2) RecallBoostMinutes int // minutes recall boost decays over (default: 30) Scoring ScoringConfig // scoring weights + + // Extra* fields extend the compiled-in filter defaults without replacing them. + ExtraIgnoredPatterns []string + ExtraLockfileNames []string + ExtraAppInternalDirs []string + ExtraSensitiveNames []string + ExtraSourceExtensions []string + ExtraTrivialCommands []string + ExtraHighSignalCommands []string + ExtraCodeIndicators []string + ExtraHighSignalKeywords []string + ExtraMediumKeywords []string + ExtraLowKeywords []string } // scoringOrDefault returns the scoring config with defaults for any zero values. @@ -111,6 +124,19 @@ type HeuristicFilter struct { recallMu sync.RWMutex done chan struct{} // signals cleanupLoop to exit + + // Resolved filter lists (compiled defaults + config extras, merged at construction) + ignoredPatterns []string + lockfileNames []string + appInternalDirs []string + sensitiveNames []string + sourceExtensions []string + trivialCommands map[string]bool + highSignalCommands map[string]bool + codeIndicators []string + highKeywords []string + mediumKeywords []string + lowKeywords []string } // recentEdit tracks a file edit for batch detection. @@ -128,6 +154,19 @@ func NewHeuristicFilter(cfg HeuristicConfig, log *slog.Logger) *HeuristicFilter frequency: make(map[string][]frequencyEntry), recalledFiles: make(map[string]time.Time), done: make(chan struct{}), + + // Merge compiled defaults with config extras + ignoredPatterns: append(append([]string{}, defaultIgnoredPatterns...), cfg.ExtraIgnoredPatterns...), + lockfileNames: append(append([]string{}, defaultLockfileNames...), cfg.ExtraLockfileNames...), + appInternalDirs: append(append([]string{}, defaultAppInternalDirs...), cfg.ExtraAppInternalDirs...), + sensitiveNames: append(append([]string{}, defaultSensitiveNames...), cfg.ExtraSensitiveNames...), + sourceExtensions: append(append([]string{}, defaultSourceExtensions...), cfg.ExtraSourceExtensions...), + trivialCommands: mergeToSet(defaultTrivialCommands, cfg.ExtraTrivialCommands), + highSignalCommands: mergeToSet(defaultHighSignalCommands, cfg.ExtraHighSignalCommands), + codeIndicators: append(append([]string{}, defaultCodeIndicators...), cfg.ExtraCodeIndicators...), + highKeywords: append(append([]string{}, defaultHighSignalKeywords...), cfg.ExtraHighSignalKeywords...), + mediumKeywords: append(append([]string{}, defaultMediumSignalKeywords...), cfg.ExtraMediumKeywords...), + lowKeywords: append(append([]string{}, defaultLowSignalKeywords...), cfg.ExtraLowKeywords...), } // Start a cleanup goroutine to periodically remove old entries @@ -339,60 +378,35 @@ func (h *HeuristicFilter) evaluateSource(source, eventType, path, content string // evaluateFilesystem scores filesystem events. func (h *HeuristicFilter) evaluateFilesystem(path, content string) (float32, string, bool) { // Skip if path contains ignored patterns — hard reject, no keyword override - ignoredPatterns := []string{".git/", "node_modules/", "__pycache__/", ".DS_Store", "~", ".swp", ".tmp", ".xbel", - "venv/", ".venv/", "site-packages/", ".tox/", ".mypy_cache/", ".ruff_cache/", ".pytest_cache/", - ".egg-info/", ".eggs/"} - - // Hard-reject lockfiles, checksums, and release tooling — deterministic files with zero semantic value - lockfileNames := []string{"go.sum", "package-lock.json", "yarn.lock", "Cargo.lock", - "poetry.lock", "pnpm-lock.yaml", "Gemfile.lock", "composer.lock", - ".release-please-manifest.json", "CHANGELOG.md"} baseName := path if idx := strings.LastIndex(baseName, "/"); idx >= 0 { baseName = baseName[idx+1:] } - for _, lf := range lockfileNames { + for _, lf := range h.lockfileNames { if baseName == lf { return 0.0, fmt.Sprintf("filesystem: lockfile '%s'", lf), true } } - for _, pattern := range ignoredPatterns { + for _, pattern := range h.ignoredPatterns { if strings.Contains(path, pattern) { return 0.0, fmt.Sprintf("filesystem: ignored path pattern '%s'", pattern), true } } // Suppress application-internal state directories — hard reject - appInternalDirs := []string{ - "/google-chrome/", "/chromium/", "/BraveSoftware/", - "/LM Studio/", "/lm-studio/", - "/Trash/", "/.local/share/Trash/", - "/leveldb/", "/IndexedDB/", "/Local Storage/", "/Session Storage/", - "/Cache/", "/GPUCache/", "/ShaderCache/", "/Code Cache/", - "/dconf/", "/gconf/", - "/pulse/", "/pipewire/", "/wireplumber/", - "/gvfs-metadata/", "/tracker3/", - "session_migration-", - "/.copilot/", "/.github-copilot/", - "/snap/", "/.snap/", - "/.config/gtk-", "/.config/dbus-", - "/.mnemonic/", "/.claude/", - } lowerPathCheck := strings.ToLower(path) - for _, dir := range appInternalDirs { + for _, dir := range h.appInternalDirs { if strings.Contains(lowerPathCheck, strings.ToLower(dir)) { return 0.0, fmt.Sprintf("filesystem: application-internal path '%s'", dir), true } } // Hard-reject sensitive files (defense-in-depth — watcher should block these first) - sensitiveNames := []string{".env", "id_rsa", "id_ed25519", "id_ecdsa", ".pem", ".key", - "credentials", "secret", ".keychain", ".keystore", ".netrc", ".htpasswd"} baseName = strings.ToLower(path) if idx := strings.LastIndex(baseName, "/"); idx >= 0 { baseName = baseName[idx+1:] } - for _, s := range sensitiveNames { + for _, s := range h.sensitiveNames { if strings.Contains(baseName, s) { return 0.0, fmt.Sprintf("filesystem: sensitive file '%s'", s), true } @@ -414,8 +428,7 @@ func (h *HeuristicFilter) evaluateFilesystem(path, content string) (float32, str } // Score boost for source code - sourceExtensions := []string{".go", ".py", ".js", ".ts", ".java", ".rs", ".cpp", ".c", ".h"} - for _, ext := range sourceExtensions { + for _, ext := range h.sourceExtensions { if strings.HasSuffix(lowerPath, ext) { score += h.scoring.BoostSourceCode rationale += "; source code" @@ -439,23 +452,12 @@ func (h *HeuristicFilter) evaluateTerminal(content string) (float32, string, boo cmd := strings.ToLower(command[0]) // Skip trivial commands (only if they are just the command itself) — hard reject - trivialCommands := map[string]bool{ - "cd": true, "ls": true, "pwd": true, "clear": true, - "exit": true, "history": true, "which": true, "whoami": true, - "echo": true, - } - - if trivialCommands[cmd] && len(command) == 1 { + if h.trivialCommands[cmd] && len(command) == 1 { return 0.0, fmt.Sprintf("terminal: trivial command '%s'", cmd), true } // Score boost for high-signal commands - highSignalCommands := map[string]bool{ - "git": true, "make": true, "go": true, "npm": true, "docker": true, - "kubectl": true, "ssh": true, "curl": true, "python": true, "node": true, - } - - for signalCmd := range highSignalCommands { + for signalCmd := range h.highSignalCommands { if strings.HasPrefix(cmd, signalCmd) { score += h.scoring.BoostCommand rationale += fmt.Sprintf("; high-signal command '%s'", cmd) @@ -480,9 +482,8 @@ func (h *HeuristicFilter) evaluateClipboard(content string) (float32, string, bo } // Score boost for code snippets - codeIndicators := []string{"{", "}", "function", "def", "class", "import", "package"} foundCodeIndicators := 0 - for _, indicator := range codeIndicators { + for _, indicator := range h.codeIndicators { if strings.Contains(content, indicator) { foundCodeIndicators++ } @@ -503,11 +504,7 @@ func (h *HeuristicFilter) scoreKeywords(content string) (float32, int) { matchCount := 0 // High signal keywords - highSignalKeywords := []string{ - "error", "bug", "fix", "todo", "hack", - "important", "decision", "deadline", "meeting", - } - for _, keyword := range highSignalKeywords { + for _, keyword := range h.highKeywords { if strings.Contains(contentLower, keyword) { score += h.scoring.KeywordHigh matchCount++ @@ -515,11 +512,7 @@ func (h *HeuristicFilter) scoreKeywords(content string) (float32, int) { } // Medium signal keywords - mediumSignalKeywords := []string{ - "config", "deploy", "release", "review", - "merge", "refactor", "test", "fail", - } - for _, keyword := range mediumSignalKeywords { + for _, keyword := range h.mediumKeywords { if strings.Contains(contentLower, keyword) { score += h.scoring.KeywordMedium matchCount++ @@ -527,10 +520,7 @@ func (h *HeuristicFilter) scoreKeywords(content string) (float32, int) { } // Low signal keywords - lowSignalKeywords := []string{ - "update", "change", "add", "remove", "create", "install", - } - for _, keyword := range lowSignalKeywords { + for _, keyword := range h.lowKeywords { if strings.Contains(contentLower, keyword) { score += h.scoring.KeywordLow matchCount++ diff --git a/internal/agent/perception/heuristic_defaults.go b/internal/agent/perception/heuristic_defaults.go new file mode 100644 index 00000000..52619b5d --- /dev/null +++ b/internal/agent/perception/heuristic_defaults.go @@ -0,0 +1,77 @@ +package perception + +// Default filter lists compiled into the binary. Users can extend these +// via the Extra* config fields in HeuristicsConfig without replacing them. + +var defaultIgnoredPatterns = []string{ + ".git/", "node_modules/", "__pycache__/", ".DS_Store", "~", ".swp", ".tmp", ".xbel", + "venv/", ".venv/", "site-packages/", ".tox/", ".mypy_cache/", ".ruff_cache/", ".pytest_cache/", + ".egg-info/", ".eggs/", +} + +var defaultLockfileNames = []string{ + "go.sum", "package-lock.json", "yarn.lock", "Cargo.lock", + "poetry.lock", "pnpm-lock.yaml", "Gemfile.lock", "composer.lock", + ".release-please-manifest.json", "CHANGELOG.md", +} + +var defaultAppInternalDirs = []string{ + "/google-chrome/", "/chromium/", "/BraveSoftware/", + "/LM Studio/", "/lm-studio/", + "/Trash/", "/.local/share/Trash/", + "/leveldb/", "/IndexedDB/", "/Local Storage/", "/Session Storage/", + "/Cache/", "/GPUCache/", "/ShaderCache/", "/Code Cache/", + "/dconf/", "/gconf/", + "/pulse/", "/pipewire/", "/wireplumber/", + "/gvfs-metadata/", "/tracker3/", + "session_migration-", + "/.copilot/", "/.github-copilot/", + "/snap/", "/.snap/", + "/.config/gtk-", "/.config/dbus-", + "/.mnemonic/", "/.claude/", +} + +var defaultSensitiveNames = []string{ + ".env", "id_rsa", "id_ed25519", "id_ecdsa", ".pem", ".key", + "credentials", "secret", ".keychain", ".keystore", ".netrc", ".htpasswd", +} + +var defaultSourceExtensions = []string{ + ".go", ".py", ".js", ".ts", ".java", ".rs", ".cpp", ".c", ".h", +} + +var defaultTrivialCommands = []string{ + "cd", "ls", "pwd", "clear", "exit", "history", "which", "whoami", "echo", +} + +var defaultHighSignalCommands = []string{ + "git", "make", "go", "npm", "docker", "kubectl", "ssh", "curl", "python", "node", +} + +var defaultCodeIndicators = []string{ + "{", "}", "function", "def", "class", "import", "package", +} + +var defaultHighSignalKeywords = []string{ + "error", "bug", "fix", "todo", "hack", "important", "decision", "deadline", "meeting", +} + +var defaultMediumSignalKeywords = []string{ + "config", "deploy", "release", "review", "merge", "refactor", "test", "fail", +} + +var defaultLowSignalKeywords = []string{ + "update", "change", "add", "remove", "create", "install", +} + +// mergeToSet builds a bool map from defaults and extras. +func mergeToSet(defaults, extras []string) map[string]bool { + m := make(map[string]bool, len(defaults)+len(extras)) + for _, s := range defaults { + m[s] = true + } + for _, s := range extras { + m[s] = true + } + return m +} diff --git a/internal/config/config.go b/internal/config/config.go index 8bee9dfd..baf48e91 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -164,6 +164,19 @@ type HeuristicsConfig struct { MaxContentLength int `yaml:"max_content_length"` FrequencyThreshold int `yaml:"frequency_threshold"` FrequencyWindowMin int `yaml:"frequency_window_min"` + + // Extra* fields extend the compiled-in defaults without replacing them. + ExtraIgnoredPatterns []string `yaml:"extra_ignored_patterns"` + ExtraLockfileNames []string `yaml:"extra_lockfile_names"` + ExtraAppInternalDirs []string `yaml:"extra_app_internal_dirs"` + ExtraSensitiveNames []string `yaml:"extra_sensitive_names"` + ExtraSourceExtensions []string `yaml:"extra_source_extensions"` + ExtraTrivialCommands []string `yaml:"extra_trivial_commands"` + ExtraHighSignalCommands []string `yaml:"extra_high_signal_commands"` + ExtraCodeIndicators []string `yaml:"extra_code_indicators"` + ExtraHighSignalKeywords []string `yaml:"extra_high_signal_keywords"` + ExtraMediumKeywords []string `yaml:"extra_medium_keywords"` + ExtraLowKeywords []string `yaml:"extra_low_keywords"` } // EncodingConfig holds encoding settings. From 9bcefb16d128e29393ff8a24c69243be5de519c1 Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 13:12:59 -0400 Subject: [PATCH 09/10] refactor: decompose store.Store into 16 composable sub-interfaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define focused sub-interfaces (RawMemoryStore, MemoryStore, SearchStore, AssociationStore, ConceptStore, EpisodeStore, PatternStore, AbstractionStore, MetacognitionStore, FeedbackStore, ConsolidationStore, SessionStore, ExclusionStore, UsageStore, ForumStore, AnalyticsStore) and redefine Store as their composition. Zero behavior change — the method set is identical, SQLiteStore and MockStore still satisfy the interface. Consumers can be gradually migrated to accept sub-interfaces in follow-up work. Addresses finding #9 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/store/store.go | 208 +++++++++++++++++++++------------------- 1 file changed, 110 insertions(+), 98 deletions(-) diff --git a/internal/store/store.go b/internal/store/store.go index 6713160b..4f4701e8 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -403,25 +403,26 @@ type ForumThread struct { LastReply time.Time `json:"last_reply"` } -// Store is the abstraction for persistent memory. -type Store interface { - // --- Raw memory operations --- +// RawMemoryStore handles raw (unencoded) memory persistence. +type RawMemoryStore interface { WriteRaw(ctx context.Context, raw RawMemory) error RawMemoryExistsByHash(ctx context.Context, contentHash string) (bool, error) GetRaw(ctx context.Context, id string) (RawMemory, error) ListRawUnprocessed(ctx context.Context, limit int) ([]RawMemory, error) ListRawMemoriesAfter(ctx context.Context, after time.Time, limit int) ([]RawMemory, error) MarkRawProcessed(ctx context.Context, id string) error - // ClaimRawForEncoding atomically marks a raw memory as processed only if it - // hasn't been claimed yet (processed=0). Returns ErrAlreadyClaimed if another - // process already claimed it. This prevents duplicate encoding across multiple - // mnemonic processes sharing the same database. ClaimRawForEncoding(ctx context.Context, id string) error - // UnclaimRawMemory resets a raw memory to unprocessed (processed=0) so it - // can be retried after a failed encoding attempt. UnclaimRawMemory(ctx context.Context, id string) error + RawMemoryExistsByPath(ctx context.Context, source string, project string, filePath string) (bool, error) + BatchWriteRaw(ctx context.Context, raws []RawMemory) error + ListAllRawMemories(ctx context.Context) ([]RawMemory, error) + CountRawUnprocessedByPathPatterns(ctx context.Context, patterns []string) (int, error) + BulkMarkRawProcessedByPathPatterns(ctx context.Context, patterns []string) (int, error) + ArchiveMemoriesByRawPathPatterns(ctx context.Context, patterns []string) (int, error) +} - // --- Encoded memory operations --- +// MemoryStore handles encoded memory CRUD operations. +type MemoryStore interface { WriteMemory(ctx context.Context, mem Memory) error GetMemory(ctx context.Context, id string) (Memory, error) GetMemoryByRawID(ctx context.Context, rawID string) (Memory, error) @@ -432,17 +433,33 @@ type Store interface { IncrementAccess(ctx context.Context, id string) error ListMemories(ctx context.Context, state string, limit, offset int) ([]Memory, error) CountMemories(ctx context.Context) (int, error) - - // --- Memory amendment --- AmendMemory(ctx context.Context, id string, newContent string, newSummary string, newConcepts []string, newEmbedding []float32) error + BatchUpdateSalience(ctx context.Context, updates map[string]float32) error + BatchMergeMemories(ctx context.Context, sourceIDs []string, gist Memory) error + DeleteOldArchived(ctx context.Context, olderThan time.Time) (int, error) + GetDeadMemories(ctx context.Context, cutoffDate time.Time) ([]Memory, error) + WriteMemoryResolution(ctx context.Context, res MemoryResolution) error + GetMemoryResolution(ctx context.Context, memoryID string) (MemoryResolution, error) + WriteMemoryAttributes(ctx context.Context, attrs MemoryAttributes) error + GetMemoryAttributes(ctx context.Context, memoryID string) (MemoryAttributes, error) +} - // --- Search operations --- +// SearchStore handles memory search and retrieval. +type SearchStore interface { SearchByFullText(ctx context.Context, query string, limit int) ([]Memory, error) SearchByEmbedding(ctx context.Context, embedding []float32, limit int) ([]RetrievalResult, error) SearchByConcepts(ctx context.Context, concepts []string, limit int) ([]Memory, error) SearchByConceptsInProject(ctx context.Context, concepts []string, project string, limit int) ([]Memory, error) + SearchByProject(ctx context.Context, project string, query string, limit int) ([]Memory, error) + SearchByEntity(ctx context.Context, name string, entityType string, limit int) ([]Memory, error) + ListMemoriesByTimeRange(ctx context.Context, from, to time.Time, limit int) ([]Memory, error) + ListMemoriesBySession(ctx context.Context, sessionID string) ([]Memory, error) + GetProjectSummary(ctx context.Context, project string) (map[string]interface{}, error) + ListProjects(ctx context.Context) ([]string, error) +} - // --- Association graph operations --- +// AssociationStore handles the memory association graph. +type AssociationStore interface { CreateAssociation(ctx context.Context, assoc Association) error GetAssociations(ctx context.Context, memoryID string) ([]Association, error) UpdateAssociationStrength(ctx context.Context, sourceID, targetID string, strength float32) error @@ -450,77 +467,28 @@ type Store interface { ActivateAssociation(ctx context.Context, sourceID, targetID string) error PruneWeakAssociations(ctx context.Context, strengthThreshold float32) (int, error) PruneOrphanedAssociations(ctx context.Context) (int, error) - - // --- Deduplication --- - RawMemoryExistsByPath(ctx context.Context, source string, project string, filePath string) (bool, error) - - // --- Cleanup operations --- - // CountRawUnprocessedByPathPatterns counts unprocessed raw memories - // whose metadata path matches any of the given substring patterns. - CountRawUnprocessedByPathPatterns(ctx context.Context, patterns []string) (int, error) - // BulkMarkRawProcessedByPathPatterns marks unprocessed raw memories as processed - // where the metadata path matches any of the given substring patterns. - BulkMarkRawProcessedByPathPatterns(ctx context.Context, patterns []string) (int, error) - // ArchiveMemoriesByRawPathPatterns archives encoded memories whose raw_id - // references a raw memory with a path matching any of the given patterns. - ArchiveMemoriesByRawPathPatterns(ctx context.Context, patterns []string) (int, error) - - // --- Batch operations (for consolidation) --- - BatchWriteRaw(ctx context.Context, raws []RawMemory) error - BatchUpdateSalience(ctx context.Context, updates map[string]float32) error - BatchMergeMemories(ctx context.Context, sourceIDs []string, gist Memory) error - DeleteOldArchived(ctx context.Context, olderThan time.Time) (int, error) - - // --- Consolidation tracking --- - WriteConsolidation(ctx context.Context, record ConsolidationRecord) error - GetLastConsolidation(ctx context.Context) (ConsolidationRecord, error) - - // --- Export/Backup operations --- - ListAllAssociations(ctx context.Context) ([]Association, error) - ListAllRawMemories(ctx context.Context) ([]RawMemory, error) - - // --- Scoped association queries --- GetAssociationsForMemoryIDs(ctx context.Context, memoryIDs []string) ([]Association, error) + ListAllAssociations(ctx context.Context) ([]Association, error) +} - // --- Metacognition operations --- - WriteMetaObservation(ctx context.Context, obs MetaObservation) error - ListMetaObservations(ctx context.Context, observationType string, limit int) ([]MetaObservation, error) - DeleteOldMetaObservations(ctx context.Context, olderThan time.Time) (int, error) - GetDeadMemories(ctx context.Context, cutoffDate time.Time) ([]Memory, error) - GetSourceDistribution(ctx context.Context) (map[string]int, error) - - // --- Retrieval feedback operations --- - WriteRetrievalFeedback(ctx context.Context, fb RetrievalFeedback) error - GetRetrievalFeedback(ctx context.Context, queryID string) (RetrievalFeedback, error) - ListRecentRetrievalFeedback(ctx context.Context, since time.Time, limit int) ([]RetrievalFeedback, error) - PruneOldFeedback(ctx context.Context, olderThan time.Duration) (int, error) - // GetMemoryFeedbackScores computes a normalized feedback score for each memory ID - // based on retrieval_feedback records. "helpful" = +1, "irrelevant" = -1, "partial" = 0. - // Returns sum/count per memory, so scores range from -1.0 to +1.0. - GetMemoryFeedbackScores(ctx context.Context, memoryIDs []string) (map[string]float32, error) +// ConceptStore handles structured concept persistence. +type ConceptStore interface { + WriteConceptSet(ctx context.Context, cs ConceptSet) error + GetConceptSet(ctx context.Context, memoryID string) (ConceptSet, error) +} - // --- Episode operations --- +// EpisodeStore handles episode lifecycle. +type EpisodeStore interface { CreateEpisode(ctx context.Context, ep Episode) error GetEpisode(ctx context.Context, id string) (Episode, error) UpdateEpisode(ctx context.Context, ep Episode) error ListEpisodes(ctx context.Context, state string, limit, offset int) ([]Episode, error) GetOpenEpisode(ctx context.Context) (Episode, error) CloseEpisode(ctx context.Context, id string) error +} - // --- Multi-resolution operations --- - WriteMemoryResolution(ctx context.Context, res MemoryResolution) error - GetMemoryResolution(ctx context.Context, memoryID string) (MemoryResolution, error) - - // --- Structured concept operations --- - WriteConceptSet(ctx context.Context, cs ConceptSet) error - GetConceptSet(ctx context.Context, memoryID string) (ConceptSet, error) - SearchByEntity(ctx context.Context, name string, entityType string, limit int) ([]Memory, error) - - // --- Memory attribute operations --- - WriteMemoryAttributes(ctx context.Context, attrs MemoryAttributes) error - GetMemoryAttributes(ctx context.Context, memoryID string) (MemoryAttributes, error) - - // --- Pattern operations --- +// PatternStore handles recurring pattern persistence. +type PatternStore interface { WritePattern(ctx context.Context, p Pattern) error GetPattern(ctx context.Context, id string) (Pattern, error) UpdatePattern(ctx context.Context, p Pattern) error @@ -529,8 +497,10 @@ type Store interface { SearchPatternsByEmbeddingInProject(ctx context.Context, embedding []float32, project string, limit int) ([]Pattern, error) ArchivePattern(ctx context.Context, id string) error ArchiveAllPatterns(ctx context.Context) (int, error) +} - // --- Abstraction operations --- +// AbstractionStore handles abstraction persistence (principles, axioms). +type AbstractionStore interface { WriteAbstraction(ctx context.Context, a Abstraction) error GetAbstraction(ctx context.Context, id string) (Abstraction, error) UpdateAbstraction(ctx context.Context, a Abstraction) error @@ -539,48 +509,62 @@ type Store interface { SearchAbstractionsByEmbedding(ctx context.Context, embedding []float32, limit int) ([]Abstraction, error) ArchiveAbstraction(ctx context.Context, id string) error ArchiveAllAbstractions(ctx context.Context) (int, error) +} - // --- Scoped queries --- - SearchByProject(ctx context.Context, project string, query string, limit int) ([]Memory, error) - ListMemoriesByTimeRange(ctx context.Context, from, to time.Time, limit int) ([]Memory, error) - ListMemoriesBySession(ctx context.Context, sessionID string) ([]Memory, error) - GetProjectSummary(ctx context.Context, project string) (map[string]interface{}, error) - ListProjects(ctx context.Context) ([]string, error) +// MetacognitionStore handles self-reflection and observation data. +type MetacognitionStore interface { + WriteMetaObservation(ctx context.Context, obs MetaObservation) error + ListMetaObservations(ctx context.Context, observationType string, limit int) ([]MetaObservation, error) + DeleteOldMetaObservations(ctx context.Context, olderThan time.Time) (int, error) + GetSourceDistribution(ctx context.Context) (map[string]int, error) +} - // --- Runtime exclusions --- - AddRuntimeExclusion(ctx context.Context, pattern string) error - RemoveRuntimeExclusion(ctx context.Context, pattern string) error - ListRuntimeExclusions(ctx context.Context) ([]string, error) +// FeedbackStore handles retrieval feedback tracking. +type FeedbackStore interface { + WriteRetrievalFeedback(ctx context.Context, fb RetrievalFeedback) error + GetRetrievalFeedback(ctx context.Context, queryID string) (RetrievalFeedback, error) + ListRecentRetrievalFeedback(ctx context.Context, since time.Time, limit int) ([]RetrievalFeedback, error) + PruneOldFeedback(ctx context.Context, olderThan time.Duration) (int, error) + GetMemoryFeedbackScores(ctx context.Context, memoryIDs []string) (map[string]float32, error) +} + +// ConsolidationStore handles consolidation cycle tracking. +type ConsolidationStore interface { + WriteConsolidation(ctx context.Context, record ConsolidationRecord) error + GetLastConsolidation(ctx context.Context) (ConsolidationRecord, error) +} - // --- Session queries --- +// SessionStore handles session queries. +type SessionStore interface { ListSessions(ctx context.Context, since time.Time, limit int) ([]SessionSummary, error) GetSessionMemories(ctx context.Context, sessionID string, limit int) ([]Memory, error) +} - // --- Housekeeping --- - GetStatistics(ctx context.Context) (StoreStatistics, error) +// ExclusionStore handles runtime watcher exclusions. +type ExclusionStore interface { + AddRuntimeExclusion(ctx context.Context, pattern string) error + RemoveRuntimeExclusion(ctx context.Context, pattern string) error + ListRuntimeExclusions(ctx context.Context) ([]string, error) +} - // --- LLM usage tracking --- +// UsageStore handles LLM and MCP tool usage tracking. +type UsageStore interface { RecordLLMUsage(ctx context.Context, record llm.LLMUsageRecord) error GetLLMUsageSummary(ctx context.Context, since time.Time) (LLMUsageSummary, error) GetLLMUsageLog(ctx context.Context, since time.Time, limit int) ([]llm.LLMUsageRecord, error) GetLLMUsageChart(ctx context.Context, since time.Time, bucketSecs int) ([]LLMChartBucket, error) - - // --- MCP tool usage tracking --- RecordToolUsage(ctx context.Context, record ToolUsageRecord) error GetToolUsageSummary(ctx context.Context, since time.Time) (ToolUsageSummary, error) GetToolUsageLog(ctx context.Context, since time.Time, limit int) ([]ToolUsageRecord, error) GetToolUsageChart(ctx context.Context, since time.Time, bucketSecs int) ([]ToolChartBucket, error) +} - // --- Research analytics --- - GetAnalytics(ctx context.Context) (AnalyticsData, error) - - // --- Forum category operations --- +// ForumStore handles forum posts and threads. +type ForumStore interface { WriteForumCategory(ctx context.Context, cat ForumCategory) error GetForumCategory(ctx context.Context, id string) (ForumCategory, error) ListForumCategories(ctx context.Context) ([]ForumCategory, error) ListForumCategorySummaries(ctx context.Context) ([]ForumCategorySummary, error) - - // --- Forum operations --- WriteForumPost(ctx context.Context, post ForumPost) error GetForumPost(ctx context.Context, id string) (ForumPost, error) ListForumThreads(ctx context.Context, limit, offset int) ([]ForumThread, error) @@ -589,6 +573,34 @@ type Store interface { UpdateForumPostState(ctx context.Context, id string, state string) error CountForumPosts(ctx context.Context) (int, error) GetDailyDigestThread(ctx context.Context, categoryID string, date time.Time) (ForumPost, error) +} + +// AnalyticsStore handles research analytics and housekeeping. +type AnalyticsStore interface { + GetStatistics(ctx context.Context) (StoreStatistics, error) + GetAnalytics(ctx context.Context) (AnalyticsData, error) +} + +// Store is the full abstraction for persistent memory. +// It composes all sub-interfaces — consumers that need only a subset +// should accept the relevant sub-interface instead. +type Store interface { + RawMemoryStore + MemoryStore + SearchStore + AssociationStore + ConceptStore + EpisodeStore + PatternStore + AbstractionStore + MetacognitionStore + FeedbackStore + ConsolidationStore + SessionStore + ExclusionStore + UsageStore + ForumStore + AnalyticsStore // --- Lifecycle --- Close() error From 8771e7b5a2aa53b889f947615abf80431ffdc91f Mon Sep 17 00:00:00 2001 From: Caleb Gross Date: Sun, 29 Mar 2026 13:28:18 -0400 Subject: [PATCH 10/10] refactor: split main.go monolith into 16 focused files Break cmd/mnemonic/main.go (3407 lines) into logical files: - main.go (241 lines): router, constants, printUsage - serve.go (730): daemon serve command - dedup_cli.go (368): dedup and reset-patterns commands - cycle.go (263): meta-cycle, dream-cycle, mcp, autopilot - status.go (251): status command and helpers - runtime.go (233): config builders, LLM provider factory - daemon.go (207): start/stop/restart commands - memory_cli.go (182): remember, recall, consolidate - purge.go (175): purge and cleanup commands - diagnose.go (174): diagnose command - export.go (143): export, import, backup - watch.go (134): watch command - web.go (124): agent web server - update.go (90): check-update, update, generate-token - insights.go (89): insights command - restore.go (73): restore command - install.go (60): install/uninstall commands - util.go (9): truncate helper All files remain in package main. No function signatures changed. Addresses finding #10 from yield audit (issue #355). Co-Authored-By: Claude Opus 4.6 (1M context) --- cmd/mnemonic/cycle.go | 263 +++ cmd/mnemonic/daemon.go | 207 +++ cmd/mnemonic/dedup_cli.go | 368 ++++ cmd/mnemonic/diagnose.go | 174 ++ cmd/mnemonic/export.go | 143 ++ cmd/mnemonic/insights.go | 89 + cmd/mnemonic/install.go | 60 + cmd/mnemonic/main.go | 3280 +----------------------------------- cmd/mnemonic/memory_cli.go | 182 ++ cmd/mnemonic/purge.go | 175 ++ cmd/mnemonic/restore.go | 73 + cmd/mnemonic/runtime.go | 233 +++ cmd/mnemonic/serve.go | 730 ++++++++ cmd/mnemonic/status.go | 251 +++ cmd/mnemonic/update.go | 90 + cmd/mnemonic/util.go | 9 + cmd/mnemonic/watch.go | 134 ++ cmd/mnemonic/web.go | 124 ++ 18 files changed, 3362 insertions(+), 3223 deletions(-) create mode 100644 cmd/mnemonic/cycle.go create mode 100644 cmd/mnemonic/daemon.go create mode 100644 cmd/mnemonic/dedup_cli.go create mode 100644 cmd/mnemonic/diagnose.go create mode 100644 cmd/mnemonic/export.go create mode 100644 cmd/mnemonic/insights.go create mode 100644 cmd/mnemonic/install.go create mode 100644 cmd/mnemonic/memory_cli.go create mode 100644 cmd/mnemonic/purge.go create mode 100644 cmd/mnemonic/restore.go create mode 100644 cmd/mnemonic/runtime.go create mode 100644 cmd/mnemonic/serve.go create mode 100644 cmd/mnemonic/status.go create mode 100644 cmd/mnemonic/update.go create mode 100644 cmd/mnemonic/util.go create mode 100644 cmd/mnemonic/watch.go create mode 100644 cmd/mnemonic/web.go diff --git a/cmd/mnemonic/cycle.go b/cmd/mnemonic/cycle.go new file mode 100644 index 00000000..8ba9a9ca --- /dev/null +++ b/cmd/mnemonic/cycle.go @@ -0,0 +1,263 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/signal" + "path/filepath" + "strings" + "time" + + "github.com/appsprout-dev/mnemonic/internal/agent/dreaming" + "github.com/appsprout-dev/mnemonic/internal/agent/encoding" + "github.com/appsprout-dev/mnemonic/internal/agent/metacognition" + "github.com/appsprout-dev/mnemonic/internal/agent/orchestrator" + "github.com/appsprout-dev/mnemonic/internal/agent/retrieval" + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/events" + "github.com/appsprout-dev/mnemonic/internal/mcp" +) + +// metaCycleCommand runs a single metacognition cycle and displays results. +func metaCycleCommand(configPath string) { + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + bus := events.NewInMemoryBus(100) + defer func() { _ = bus.Close() }() + + agent := metacognition.NewMetacognitionAgent(db, llmProvider, metacognition.MetacognitionConfig{ + Interval: 24 * time.Hour, // doesn't matter for RunOnce + ReflectionLookback: cfg.Metacognition.ReflectionLookback, + DeadMemoryWindow: cfg.Metacognition.DeadMemoryWindow, + }, log) + + fmt.Println("Running metacognition cycle...") + + report, err := agent.RunOnce(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Metacognition cycle failed: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sMetacognition complete%s (%dms):\n", colorGreen, colorReset, report.Duration.Milliseconds()) + + if len(report.Observations) == 0 { + fmt.Println(" No issues found — memory health looks good.") + return + } + + fmt.Printf(" %d observation(s):\n\n", len(report.Observations)) + for _, obs := range report.Observations { + severityColor := colorGray + switch obs.Severity { + case "warning": + severityColor = colorYellow + case "critical": + severityColor = colorRed + case "info": + severityColor = colorCyan + } + + typeLabel := strings.ReplaceAll(obs.ObservationType, "_", " ") + typeLabel = strings.ToUpper(typeLabel[:1]) + typeLabel[1:] + + fmt.Printf(" %s[%s]%s %s\n", severityColor, strings.ToUpper(obs.Severity), colorReset, typeLabel) + for key, val := range obs.Details { + keyLabel := strings.ReplaceAll(key, "_", " ") + fmt.Printf(" %s: %v\n", keyLabel, val) + } + fmt.Println() + } +} + +// dreamCycleCommand runs a single dream cycle and displays results. +func dreamCycleCommand(configPath string) { + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + bus := events.NewInMemoryBus(100) + defer func() { _ = bus.Close() }() + + agent := dreaming.NewDreamingAgent(db, llmProvider, dreaming.DreamingConfig{ + Interval: 3 * time.Hour, // doesn't matter for RunOnce + BatchSize: cfg.Dreaming.BatchSize, + SalienceThreshold: cfg.Dreaming.SalienceThreshold, + AssociationBoostFactor: cfg.Dreaming.AssociationBoostFactor, + NoisePruneThreshold: cfg.Dreaming.NoisePruneThreshold, + DeadMemoryWindow: cfg.Dreaming.DeadMemoryWindow, + InsightsBudget: cfg.Dreaming.InsightsBudget, + DefaultConfidence: cfg.Dreaming.DefaultConfidence, + }, log) + + fmt.Println("Running dream cycle (memory replay)...") + + report, err := agent.RunOnce(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Dream cycle failed: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sDream cycle complete%s (%dms):\n", colorGreen, colorReset, report.Duration.Milliseconds()) + fmt.Printf(" Memories replayed: %d\n", report.MemoriesReplayed) + fmt.Printf(" Associations strengthened: %d\n", report.AssociationsStrengthened) + fmt.Printf(" New associations created: %d\n", report.NewAssociationsCreated) + fmt.Printf(" Noisy memories demoted: %d\n", report.NoisyMemoriesDemoted) +} + +// mcpCommand runs the MCP server on stdin/stdout for AI agent integration. +func mcpCommand(configPath string) { + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bus := events.NewInMemoryBus(100) + defer func() { _ = bus.Close() }() + + // Create encoding agent so remembered memories get encoded. + // Polling is disabled in MCP mode — each MCP process only encodes via events + // for memories it creates. The daemon is the sole poller. This prevents N + // MCP processes from independently encoding the same unprocessed raw memories. + mcpEncodingCfg := buildEncodingConfig(cfg) + mcpEncodingCfg.DisablePolling = true + encoder := encoding.NewEncodingAgentWithConfig(db, llmProvider, log, mcpEncodingCfg) + if err := encoder.Start(ctx, bus); err != nil { + log.Error("failed to start encoding agent for MCP", "error", err) + } + defer func() { _ = encoder.Stop() }() + + // Create retrieval agent for recall + retriever := retrieval.NewRetrievalAgent(db, llmProvider, buildRetrievalConfig(cfg), log, bus) + + mcpResolver := config.NewProjectResolver(cfg.Projects) + daemonURL := fmt.Sprintf("http://%s:%d", cfg.API.Host, cfg.API.Port) + memDefaults := mcp.MemoryDefaults{ + SalienceGeneral: cfg.MemoryDefaults.InitialSalienceGeneral, + SalienceDecision: cfg.MemoryDefaults.InitialSalienceDecision, + SalienceError: cfg.MemoryDefaults.InitialSalienceError, + SalienceInsight: cfg.MemoryDefaults.InitialSalienceInsight, + SalienceLearning: cfg.MemoryDefaults.InitialSalienceLearning, + SalienceHandoff: cfg.MemoryDefaults.InitialSalienceHandoff, + FeedbackStrengthDelta: cfg.MemoryDefaults.FeedbackStrengthDelta, + FeedbackSalienceBoost: cfg.MemoryDefaults.FeedbackSalienceBoost, + } + server := mcp.NewMCPServer(db, retriever, bus, log, Version, cfg.Coaching.CoachingFile, cfg.Perception.Filesystem.ExcludePatterns, cfg.Perception.Filesystem.MaxContentBytes, mcpResolver, daemonURL, memDefaults) + + // Handle signal for graceful shutdown + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, shutdownSignals()...) + go func() { + <-sigChan + cancel() + }() + + if err := server.Run(ctx); err != nil { + fmt.Fprintf(os.Stderr, "MCP server error: %v\n", err) + os.Exit(1) + } +} + +// autopilotCommand shows what the system has been doing autonomously. +func autopilotCommand(configPath string) { + _, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + // Read health report + homeDir, _ := os.UserHomeDir() + healthPath := filepath.Join(homeDir, ".mnemonic", "health.json") + data, err := os.ReadFile(healthPath) + + fmt.Println("=== Mnemonic Autopilot Report ===") + fmt.Println() + + if err == nil { + var report orchestrator.HealthReport + if json.Unmarshal(data, &report) == nil { + fmt.Printf("Last report: %s\n", report.Timestamp.Format("2006-01-02 15:04:05")) + fmt.Printf("Uptime: %s\n", report.Uptime) + fmt.Printf("LLM available: %v\n", report.LLMAvailable) + fmt.Printf("Store healthy: %v\n", report.StoreHealthy) + fmt.Printf("Memories: %d\n", report.MemoryCount) + fmt.Printf("Patterns: %d\n", report.PatternCount) + fmt.Printf("Abstractions: %d\n", report.AbstractionCount) + fmt.Printf("Last consolidation: %s\n", report.LastConsolidation) + fmt.Printf("Autonomous actions: %d\n", report.AutonomousActions) + + if len(report.Warnings) > 0 { + fmt.Println() + fmt.Println("Warnings:") + for _, w := range report.Warnings { + fmt.Printf(" - %s\n", w) + } + } + } + } else { + fmt.Println("No health report found. Start the daemon to generate one.") + } + + // Show recent autonomous actions + fmt.Println() + fmt.Println("--- Recent Autonomous Actions ---") + actions, err := db.ListMetaObservations(ctx, "autonomous_action", 10) + if err == nil && len(actions) > 0 { + for _, a := range actions { + action := "" + if act, ok := a.Details["action"].(string); ok { + action = act + } + fmt.Printf(" [%s] %s (severity: %s)\n", + a.CreatedAt.Format("2006-01-02 15:04"), action, a.Severity) + } + } else { + fmt.Println(" No autonomous actions recorded yet.") + } + + // Show recent patterns discovered + fmt.Println() + fmt.Println("--- Discovered Patterns ---") + patterns, err := db.ListPatterns(ctx, "", 5) + if err == nil && len(patterns) > 0 { + for _, p := range patterns { + project := "" + if p.Project != "" { + project = fmt.Sprintf(" [%s]", p.Project) + } + fmt.Printf(" %s%s: %s (strength: %.2f, evidence: %d)\n", + p.Title, project, p.Description, p.Strength, len(p.EvidenceIDs)) + } + } else { + fmt.Println(" No patterns discovered yet.") + } + + // Show abstractions + fmt.Println() + fmt.Println("--- Abstractions ---") + hasAbstractions := false + for _, level := range []int{2, 3} { + abs, err := db.ListAbstractions(ctx, level, 5) + if err == nil && len(abs) > 0 { + hasAbstractions = true + for _, a := range abs { + levelLabel := "principle" + if a.Level == 3 { + levelLabel = "axiom" + } + fmt.Printf(" [%s] %s: %s (confidence: %.2f)\n", + levelLabel, a.Title, a.Description, a.Confidence) + } + } + } + if !hasAbstractions { + fmt.Println(" No abstractions generated yet.") + } + + fmt.Println() +} diff --git a/cmd/mnemonic/daemon.go b/cmd/mnemonic/daemon.go new file mode 100644 index 00000000..af180489 --- /dev/null +++ b/cmd/mnemonic/daemon.go @@ -0,0 +1,207 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" +) + +// startCommand launches the mnemonic daemon in the background. +func startCommand(configPath string) { + svc := daemon.NewServiceManager() + + // If platform service is installed, use it + if svc.IsInstalled() { + if running, pid := svc.IsRunning(); running { + fmt.Printf("Mnemonic is already running (%s, PID %d)\n", svc.ServiceName(), pid) + os.Exit(1) + } + fmt.Printf("Starting mnemonic service...\n") + if err := svc.Start(); err != nil { + fmt.Fprintf(os.Stderr, "Error starting service: %v\n", err) + os.Exit(1) + } + // Wait and check if it started + time.Sleep(2 * time.Second) + if running, pid := svc.IsRunning(); running { + cfg, _ := config.Load(configPath) + fmt.Printf("%sMnemonic started%s (%s, PID %d)\n", colorGreen, colorReset, svc.ServiceName(), pid) + if cfg != nil { + fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) + healthURL := fmt.Sprintf("http://%s:%d/api/v1/health", cfg.API.Host, cfg.API.Port) + checkLLMFromAPI(healthURL, cfg.LLM.Endpoint, cfg.API.Token) + } + fmt.Printf(" Logs: %s\n", daemon.LogPath()) + } else { + fmt.Printf("%sWarning:%s Service started but process not running yet.\n", colorYellow, colorReset) + fmt.Printf(" Check logs: %s\n", daemon.LogPath()) + } + return + } + + // Fall back to PID-file-based daemon start + if running, pid := daemon.IsRunning(); running { + fmt.Printf("Mnemonic is already running (PID %d)\n", pid) + os.Exit(1) + } + + // Validate config can be loaded before starting + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + // Resolve to absolute config path (so daemon finds it after detach) + absConfigPath, err := filepath.Abs(configPath) + if err != nil { + die(exitGeneral, fmt.Sprintf("resolving config path: %v", err), "") + } + + // Get our binary path + execPath, err := os.Executable() + if err != nil { + die(exitGeneral, fmt.Sprintf("finding executable: %v", err), "") + } + + fmt.Printf("Starting mnemonic daemon...\n") + + pid, err := daemon.Start(execPath, absConfigPath) + if err != nil { + die(exitGeneral, fmt.Sprintf("starting daemon: %v", err), "mnemonic diagnose") + } + + // Wait briefly and verify daemon is healthy via API + time.Sleep(2 * time.Second) + apiURL := fmt.Sprintf("http://%s:%d/api/v1/health", cfg.API.Host, cfg.API.Port) + healthy := false + for i := 0; i < 3; i++ { + resp, err := apiGet(apiURL, cfg.API.Token) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode == http.StatusOK { + healthy = true + break + } + } + time.Sleep(1 * time.Second) + } + + if healthy { + fmt.Printf("%sMnemonic started%s (PID %d)\n", colorGreen, colorReset, pid) + fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) + fmt.Printf(" Logs: %s\n", daemon.LogPath()) + fmt.Printf(" PID file: %s\n", daemon.PIDFilePath()) + + // Check if LLM is available via health endpoint + checkLLMFromAPI(apiURL, cfg.LLM.Endpoint, cfg.API.Token) + } else { + fmt.Printf("%sWarning:%s Daemon started (PID %d) but health check failed.\n", colorYellow, colorReset, pid) + fmt.Printf(" Check logs: %s\n", daemon.LogPath()) + } +} + +// stopCommand stops the running mnemonic daemon. +func stopCommand() { + svc := daemon.NewServiceManager() + + // Check platform service first + if svc.IsInstalled() { + if running, pid := svc.IsRunning(); running { + fmt.Printf("Stopping mnemonic service (PID %d)...\n", pid) + if err := svc.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "Error stopping service: %v\n", err) + os.Exit(1) + } + // Wait for process to actually exit + time.Sleep(2 * time.Second) + fmt.Printf("%sMnemonic stopped.%s\n", colorGreen, colorReset) + return + } + } + + // Fall back to PID file + running, pid := daemon.IsRunning() + if !running { + fmt.Println("Mnemonic is not running.") + os.Exit(0) + } + + fmt.Printf("Stopping mnemonic daemon (PID %d)...\n", pid) + + if err := daemon.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "Error stopping daemon: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sMnemonic stopped.%s\n", colorGreen, colorReset) +} + +// restartCommand stops and starts the mnemonic daemon. +func restartCommand(configPath string) { + svc := daemon.NewServiceManager() + + // Check platform service first + if svc.IsInstalled() { + if running, pid := svc.IsRunning(); running { + fmt.Printf("Stopping mnemonic service (PID %d)...\n", pid) + if err := svc.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "Error stopping service: %v\n", err) + os.Exit(1) + } + time.Sleep(2 * time.Second) + } + startCommand(configPath) + return + } + + // Fall back to PID file + if running, pid := daemon.IsRunning(); running { + fmt.Printf("Stopping mnemonic daemon (PID %d)...\n", pid) + if err := daemon.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "Error stopping daemon: %v\n", err) + os.Exit(1) + } + time.Sleep(1 * time.Second) + } + + startCommand(configPath) +} + +// apiGet performs an HTTP GET with optional bearer token auth. +func apiGet(url, token string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + return http.DefaultClient.Do(req) +} + +// checkLLMFromAPI queries the health endpoint and warns if LLM is unavailable. +func checkLLMFromAPI(healthURL, llmEndpoint, token string) { + resp, err := apiGet(healthURL, token) + if err != nil { + return + } + defer func() { _ = resp.Body.Close() }() + + var health map[string]interface{} + if json.NewDecoder(resp.Body).Decode(&health) != nil { + return + } + + llmAvail, _ := health["llm_available"].(bool) + if !llmAvail { + fmt.Printf("\n %s⚠ LLM provider is not reachable at %s%s\n", colorYellow, llmEndpoint, colorReset) + fmt.Printf(" Memory encoding will not work until the LLM provider is running.\n") + fmt.Printf(" Run 'mnemonic diagnose' for details.\n") + } +} diff --git a/cmd/mnemonic/dedup_cli.go b/cmd/mnemonic/dedup_cli.go new file mode 100644 index 00000000..b45c45ce --- /dev/null +++ b/cmd/mnemonic/dedup_cli.go @@ -0,0 +1,368 @@ +package main + +import ( + "context" + "fmt" + "math" + "os" + "time" + + "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" + "github.com/appsprout-dev/mnemonic/internal/store" +) + +// dedupCommand scans active memories for near-duplicate clusters and archives duplicates. +// With --apply it modifies the DB; without it, it's a dry-run that reports what would change. +func dedupCommand(configPath string, dryRun bool) { + cfg, db, _, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + threshold := float32(cfg.Encoding.DeduplicationThreshold) + if threshold <= 0 { + threshold = 0.9 + } + + if dryRun { + fmt.Printf("Dedup dry-run (threshold: %.2f). Use --apply to execute.\n\n", threshold) + } else { + fmt.Printf("Dedup (threshold: %.2f). Archiving duplicates...\n\n", threshold) + } + + // Load all active memories in pages + var allMemories []store.Memory + offset := 0 + pageSize := 200 + for { + page, err := db.ListMemories(ctx, "active", pageSize, offset) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to load memories: %v\n", err) + os.Exit(1) + } + allMemories = append(allMemories, page...) + if len(page) < pageSize { + break + } + offset += pageSize + } + + // Filter to memories with embeddings + var withEmbeddings []store.Memory + for _, m := range allMemories { + if len(m.Embedding) > 0 { + withEmbeddings = append(withEmbeddings, m) + } + } + + fmt.Printf("Active memories: %d (%d with embeddings)\n", len(allMemories), len(withEmbeddings)) + + // Union-find clustering: for each pair above threshold, merge clusters + clusterOf := make(map[string]string) // memory ID → cluster representative ID + for i := range withEmbeddings { + clusterOf[withEmbeddings[i].ID] = withEmbeddings[i].ID + } + + // Find root of cluster (with path compression) + var find func(string) string + find = func(id string) string { + if clusterOf[id] != id { + clusterOf[id] = find(clusterOf[id]) + } + return clusterOf[id] + } + + // Union two IDs into the same cluster + union := func(a, b string) { + ra, rb := find(a), find(b) + if ra != rb { + clusterOf[ra] = rb + } + } + + // O(n^2) pairwise comparison — fine for <1000 memories + comparisons := 0 + for i := 0; i < len(withEmbeddings); i++ { + for j := i + 1; j < len(withEmbeddings); j++ { + sim := agentutil.CosineSimilarity(withEmbeddings[i].Embedding, withEmbeddings[j].Embedding) + comparisons++ + if sim >= threshold { + union(withEmbeddings[i].ID, withEmbeddings[j].ID) + } + } + } + + // Build clusters + clusters := make(map[string][]store.Memory) // representative ID → members + for _, m := range withEmbeddings { + root := find(m.ID) + clusters[root] = append(clusters[root], m) + } + + // Filter to clusters with more than 1 member (actual duplicates) + dupClusters := 0 + totalDups := 0 + totalArchived := 0 + totalAssocTransferred := 0 + + for _, members := range clusters { + if len(members) <= 1 { + continue + } + dupClusters++ + totalDups += len(members) + + // Pick survivor: highest salience, then most recently accessed, then newest + survivor := members[0] + for _, m := range members[1:] { + if m.Salience > survivor.Salience { + survivor = m + } else if m.Salience == survivor.Salience && m.LastAccessed.After(survivor.LastAccessed) { + survivor = m + } else if m.Salience == survivor.Salience && m.LastAccessed.Equal(survivor.LastAccessed) && m.CreatedAt.After(survivor.CreatedAt) { + survivor = m + } + } + + fmt.Printf("Cluster (%d members):\n", len(members)) + fmt.Printf(" Survivor: %s (salience=%.2f) %s\n", survivor.ID[:8], survivor.Salience, truncate(survivor.Summary, 60)) + for _, m := range members { + if m.ID == survivor.ID { + continue + } + fmt.Printf(" Archive: %s (salience=%.2f) %s\n", m.ID[:8], m.Salience, truncate(m.Summary, 60)) + + if !dryRun { + // Transfer associations from archived memory to survivor + assocs, err := db.GetAssociations(ctx, m.ID) + if err != nil { + log.Warn("failed to get associations", "memory_id", m.ID, "error", err) + } else { + for _, a := range assocs { + targetID := a.TargetID + if targetID == m.ID { + targetID = a.SourceID + } + if targetID == survivor.ID { + continue // skip self-association + } + newAssoc := store.Association{ + SourceID: survivor.ID, + TargetID: targetID, + Strength: a.Strength, + RelationType: a.RelationType, + CreatedAt: a.CreatedAt, + LastActivated: a.LastActivated, + } + if err := db.CreateAssociation(ctx, newAssoc); err != nil { + // Likely duplicate — ignore + log.Debug("association transfer skipped (likely exists)", "source", survivor.ID[:8], "target", targetID[:8]) + } else { + totalAssocTransferred++ + } + } + } + + // Archive the duplicate + if err := db.UpdateState(ctx, m.ID, "archived"); err != nil { + log.Warn("failed to archive duplicate", "memory_id", m.ID, "error", err) + } else { + totalArchived++ + } + } + } + fmt.Println() + } + + fmt.Printf("Summary:\n") + fmt.Printf(" Comparisons: %d\n", comparisons) + fmt.Printf(" Dup clusters: %d (%d memories)\n", dupClusters, totalDups) + if dryRun { + fmt.Printf(" Would archive: %d memories\n", totalDups-dupClusters) + fmt.Printf("\nRun with --apply to execute.\n") + } else { + fmt.Printf(" Archived: %d memories\n", totalArchived) + fmt.Printf(" Associations: %d transferred\n", totalAssocTransferred) + + // Clean up dangling associations pointing to archived memories + pruned, err := db.PruneOrphanedAssociations(ctx) + if err != nil { + log.Warn("failed to prune orphaned associations", "error", err) + } else { + fmt.Printf(" Orphaned assocs pruned: %d\n", pruned) + } + } +} + +// resetPatternsCommand recalculates pattern strengths using logarithmic scaling +// and merges near-duplicate patterns. Dry-run by default; use --apply to execute. +func resetPatternsCommand(configPath string, dryRun bool) { + _, db, _, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + // Load all patterns (no project filter, high limit) + patterns, err := db.ListPatterns(ctx, "", 1000) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to load patterns: %v\n", err) + os.Exit(1) + } + + if dryRun { + fmt.Printf("Pattern reset dry-run. Use --apply to execute.\n\n") + } else { + fmt.Printf("Pattern reset. Recalculating strengths and merging duplicates...\n\n") + } + + fmt.Printf("Total patterns: %d\n\n", len(patterns)) + + // Phase 1: Recalculate strengths using logarithmic formula + strengthCeiling := float32(0.95) + strongCeiling := float32(1.0) + strongMinCount := 50 + + fmt.Printf("=== Strength Recalculation ===\n") + fmt.Printf("Formula: 0.5 + 0.03 * log2(1 + evidenceCount)\n") + fmt.Printf("Ceiling: %.2f (%.2f with %d+ evidence)\n\n", strengthCeiling, strongCeiling, strongMinCount) + + recalculated := 0 + for i := range patterns { + p := &patterns[i] + if p.State != "active" { + continue + } + evidenceCount := len(p.EvidenceIDs) + newStrength := float32(0.5) + 0.03*float32(math.Log2(1+float64(evidenceCount))) + ceiling := strengthCeiling + if evidenceCount > strongMinCount { + ceiling = strongCeiling + } + if newStrength > ceiling { + newStrength = ceiling + } + if newStrength != p.Strength { + fmt.Printf(" %-50s evidence=%3d %.2f -> %.2f\n", + truncate(p.Title, 50), evidenceCount, p.Strength, newStrength) + if !dryRun { + p.Strength = newStrength + p.UpdatedAt = time.Now() + if err := db.UpdatePattern(ctx, *p); err != nil { + log.Warn("failed to update pattern strength", "pattern_id", p.ID, "error", err) + } + } + recalculated++ + } + } + fmt.Printf("\nRecalculated: %d patterns\n\n", recalculated) + + // Phase 2: Merge near-duplicate patterns (>0.80 cosine similarity) + const mergeThreshold = float32(0.80) + fmt.Printf("=== Duplicate Pattern Merge (threshold: %.2f) ===\n\n", mergeThreshold) + + // Filter to active patterns with embeddings + var active []int + for i, p := range patterns { + if p.State == "active" && len(p.Embedding) > 0 { + active = append(active, i) + } + } + + // Union-find for pattern clustering + parent := make(map[int]int) + for _, i := range active { + parent[i] = i + } + var findRoot func(int) int + findRoot = func(i int) int { + if parent[i] != i { + parent[i] = findRoot(parent[i]) + } + return parent[i] + } + + for ai := 0; ai < len(active); ai++ { + for bi := ai + 1; bi < len(active); bi++ { + i, j := active[ai], active[bi] + sim := agentutil.CosineSimilarity(patterns[i].Embedding, patterns[j].Embedding) + if sim >= mergeThreshold { + ri, rj := findRoot(i), findRoot(j) + if ri != rj { + parent[ri] = rj + } + } + } + } + + // Build clusters + patternClusters := make(map[int][]int) + for _, i := range active { + root := findRoot(i) + patternClusters[root] = append(patternClusters[root], i) + } + + merged := 0 + for _, members := range patternClusters { + if len(members) <= 1 { + continue + } + + // Pick survivor: most evidence, then highest strength + survivorIdx := members[0] + for _, idx := range members[1:] { + if len(patterns[idx].EvidenceIDs) > len(patterns[survivorIdx].EvidenceIDs) { + survivorIdx = idx + } else if len(patterns[idx].EvidenceIDs) == len(patterns[survivorIdx].EvidenceIDs) && + patterns[idx].Strength > patterns[survivorIdx].Strength { + survivorIdx = idx + } + } + + survivor := &patterns[survivorIdx] + fmt.Printf("Cluster (%d patterns):\n", len(members)) + fmt.Printf(" Survivor: %s (evidence=%d)\n", truncate(survivor.Title, 60), len(survivor.EvidenceIDs)) + + for _, idx := range members { + if idx == survivorIdx { + continue + } + dup := &patterns[idx] + fmt.Printf(" Archive: %s (evidence=%d)\n", truncate(dup.Title, 60), len(dup.EvidenceIDs)) + + if !dryRun { + // Merge evidence IDs into survivor + existingEvidence := make(map[string]bool) + for _, eid := range survivor.EvidenceIDs { + existingEvidence[eid] = true + } + for _, eid := range dup.EvidenceIDs { + if !existingEvidence[eid] { + survivor.EvidenceIDs = append(survivor.EvidenceIDs, eid) + } + } + survivor.UpdatedAt = time.Now() + if err := db.UpdatePattern(ctx, *survivor); err != nil { + log.Warn("failed to update survivor pattern", "id", survivor.ID, "error", err) + } + + // Archive the duplicate + dup.State = "archived" + dup.UpdatedAt = time.Now() + if err := db.UpdatePattern(ctx, *dup); err != nil { + log.Warn("failed to archive duplicate pattern", "id", dup.ID, "error", err) + } + } + merged++ + } + fmt.Println() + } + + fmt.Printf("Summary:\n") + fmt.Printf(" Strengths recalculated: %d\n", recalculated) + if dryRun { + fmt.Printf(" Would merge: %d duplicate patterns\n", merged) + fmt.Printf("\nRun with --apply to execute.\n") + } else { + fmt.Printf(" Patterns merged: %d\n", merged) + } +} diff --git a/cmd/mnemonic/diagnose.go b/cmd/mnemonic/diagnose.go new file mode 100644 index 00000000..c858dffb --- /dev/null +++ b/cmd/mnemonic/diagnose.go @@ -0,0 +1,174 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/store/sqlite" +) + +// diagnoseCommand runs a series of health checks and reports PASS/FAIL/WARN. +func diagnoseCommand(configPath string) { + fmt.Printf("%sMnemonic v%s — Diagnostics%s\n\n", colorBold, Version, colorReset) + + passed, warned, failed := 0, 0, 0 + + pass := func(label, detail string) { + fmt.Printf(" %-16s %sPASS%s %s\n", label, colorGreen, colorReset, detail) + passed++ + } + warn := func(label, detail string) { + fmt.Printf(" %-16s %sWARN%s %s\n", label, colorYellow, colorReset, detail) + warned++ + } + fail := func(label, detail string) { + fmt.Printf(" %-16s %sFAIL%s %s\n", label, colorRed, colorReset, detail) + failed++ + } + + // 1. Config + cfg, err := config.Load(configPath) + if err != nil { + fail("Config", fmt.Sprintf("failed to load %s: %v", configPath, err)) + // Can't continue most checks without config + fmt.Printf("\n %s%d passed, %d warnings, %d failed%s\n\n", colorBold, passed, warned, failed, colorReset) + if failed > 0 { + os.Exit(1) + } + return + } + pass("Config", fmt.Sprintf("loaded from %s", configPath)) + + // 2. Data directory + home, homeErr := os.UserHomeDir() + if homeErr != nil { + fail("Data dir", fmt.Sprintf("cannot determine home directory: %v", homeErr)) + } else { + dataPath := filepath.Join(home, ".mnemonic") + info, err := os.Stat(dataPath) + if err != nil { + warn("Data dir", fmt.Sprintf("%s does not exist (will be created on first serve)", dataPath)) + } else if !info.IsDir() { + fail("Data dir", fmt.Sprintf("%s exists but is not a directory", dataPath)) + } else { + // Check writable by creating a temp file + tmpPath := filepath.Join(dataPath, ".diagnose_test") + if err := os.WriteFile(tmpPath, []byte("test"), 0600); err != nil { + fail("Data dir", fmt.Sprintf("%s is not writable: %v", dataPath, err)) + } else { + _ = os.Remove(tmpPath) + pass("Data dir", dataPath) + } + } + } + + // 3. Database + var diagDB *sqlite.SQLiteStore + dbInfo, dbErr := os.Stat(cfg.Store.DBPath) + if dbErr != nil { + fail("Database", fmt.Sprintf("file not found: %s", cfg.Store.DBPath)) + } else { + dbSizeMB := float64(dbInfo.Size()) / (1024 * 1024) + + db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err != nil { + fail("Database", fmt.Sprintf("cannot open: %v", err)) + } else { + diagDB = db + defer func() { _ = diagDB.Close() }() + ctx := context.Background() + + // Integrity check + var integrityResult string + row := diagDB.DB().QueryRowContext(ctx, "PRAGMA integrity_check") + if err := row.Scan(&integrityResult); err != nil { + fail("Database", fmt.Sprintf("integrity check error: %v", err)) + } else if integrityResult != "ok" { + fail("Database", fmt.Sprintf("integrity check: %s", integrityResult)) + } else { + stats, err := diagDB.GetStatistics(ctx) + if err != nil { + warn("Database", fmt.Sprintf("integrity OK but stats failed: %v", err)) + } else { + pass("Database", fmt.Sprintf("integrity OK, %d memories (%d active), %.1f MB", + stats.TotalMemories, stats.ActiveMemories, dbSizeMB)) + } + } + } + } + + // 4. LLM provider + llmProvider := newLLMProvider(cfg) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := llmProvider.Health(ctx); err != nil { + fail("LLM", fmt.Sprintf("LLM provider not reachable at %s (%v)", cfg.LLM.Endpoint, err)) + } else { + // Try a quick embedding to verify the model works + _, embErr := llmProvider.Embed(ctx, "test") + if embErr != nil { + warn("LLM", fmt.Sprintf("reachable at %s but embedding failed: %v", cfg.LLM.Endpoint, embErr)) + } else { + pass("LLM", fmt.Sprintf("model %s at %s", cfg.LLM.ChatModel, cfg.LLM.Endpoint)) + } + } + + // 5. Daemon + svc := daemon.NewServiceManager() + if svcRunning, svcPid := svc.IsRunning(); svcRunning { + pass("Daemon", fmt.Sprintf("running (%s, PID %d)", svc.ServiceName(), svcPid)) + } else if running, pid := daemon.IsRunning(); running { + pass("Daemon", fmt.Sprintf("running (PID %d)", pid)) + } else { + warn("Daemon", "not running — use 'mnemonic start' or 'mnemonic serve'") + } + + // 6. Disk space + if homeErr == nil { + dbDir := filepath.Dir(cfg.Store.DBPath) + availBytes, err := diskAvailable(dbDir) + if err == nil { + availGB := float64(availBytes) / (1024 * 1024 * 1024) + if availGB < 1.0 { + fail("Disk", fmt.Sprintf("%.1f GB available on %s — critically low", availGB, dbDir)) + } else if availGB < 5.0 { + warn("Disk", fmt.Sprintf("%.1f GB available on %s", availGB, dbDir)) + } else { + pass("Disk", fmt.Sprintf("%.0f GB available", availGB)) + } + } + // If we can't check disk, just skip silently (platform-specific) + } + + // 7. Encoding queue (reuse DB connection from check 3) + if diagDB != nil { + ctx := context.Background() + var unprocessed int + row := diagDB.DB().QueryRowContext(ctx, "SELECT COUNT(*) FROM raw_memories WHERE processed = 0") + if row.Scan(&unprocessed) == nil { + if unprocessed > 500 { + warn("Encoding queue", fmt.Sprintf("%d unprocessed raw memories (LLM may be falling behind)", unprocessed)) + } else { + pass("Encoding queue", fmt.Sprintf("%d unprocessed", unprocessed)) + } + } + } + + // Summary + fmt.Println() + if failed > 0 { + fmt.Printf(" %s%d passed, %d warnings, %d failed%s\n\n", colorRed, passed, warned, failed, colorReset) + os.Exit(1) + } else if warned > 0 { + fmt.Printf(" %s%d passed, %d warnings%s\n\n", colorYellow, passed, warned, colorReset) + } else { + fmt.Printf(" %sAll %d checks passed%s\n\n", colorGreen, passed, colorReset) + } +} diff --git a/cmd/mnemonic/export.go b/cmd/mnemonic/export.go new file mode 100644 index 00000000..84b3f3fd --- /dev/null +++ b/cmd/mnemonic/export.go @@ -0,0 +1,143 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/appsprout-dev/mnemonic/internal/backup" +) + +// exportCommand exports the memory store to a file. +func exportCommand(configPath string, args []string) { + cfg, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + // Parse flags + format := "json" + outputPath := "" + for i := 1; i < len(args); i++ { + switch args[i] { + case "--format": + if i+1 < len(args) { + format = args[i+1] + i++ + } + case "--output": + if i+1 < len(args) { + outputPath = args[i+1] + i++ + } + } + } + + // Default output path + if outputPath == "" { + backupDir, err := backup.EnsureBackupDir() + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating backup directory: %v\n", err) + os.Exit(1) + } + timestamp := time.Now().Format("2006-01-02_150405") + outputPath = filepath.Join(backupDir, fmt.Sprintf("export_%s.%s", timestamp, format)) + } + + switch format { + case "json": + fmt.Printf("Exporting to JSON: %s\n", outputPath) + if err := backup.ExportJSON(ctx, db, outputPath); err != nil { + fmt.Fprintf(os.Stderr, "Export failed: %v\n", err) + os.Exit(1) + } + case "sqlite": + fmt.Printf("Exporting SQLite copy: %s\n", outputPath) + if err := backup.ExportSQLite(ctx, cfg.Store.DBPath, outputPath); err != nil { + fmt.Fprintf(os.Stderr, "Export failed: %v\n", err) + os.Exit(1) + } + default: + fmt.Fprintf(os.Stderr, "Unknown format: %s (supported: json, sqlite)\n", format) + os.Exit(1) + } + + // Get file size + if info, err := os.Stat(outputPath); err == nil { + fmt.Printf("%sExport complete.%s (%.1f KB)\n", colorGreen, colorReset, float64(info.Size())/1024) + } else { + fmt.Printf("%sExport complete.%s\n", colorGreen, colorReset) + } +} + +// importCommand imports memories from a JSON export file. +func importCommand(configPath, filePath string, args []string) { + _, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + // Parse mode + mode := backup.ModeMerge + for i := 2; i < len(args); i++ { + if args[i] == "--mode" && i+1 < len(args) { + switch args[i+1] { + case "merge": + mode = backup.ModeMerge + case "replace": + mode = backup.ModeReplace + default: + fmt.Fprintf(os.Stderr, "Unknown mode: %s (supported: merge, replace)\n", args[i+1]) + os.Exit(1) + } + i++ + } + } + + fmt.Printf("Importing from %s (mode: %s)...\n", filePath, mode) + + result, err := backup.ImportFromJSON(ctx, db, filePath, mode) + if err != nil { + fmt.Fprintf(os.Stderr, "Import failed: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sImport complete%s (%dms):\n", colorGreen, colorReset, result.Duration.Milliseconds()) + fmt.Printf(" Memories imported: %d\n", result.MemoriesImported) + fmt.Printf(" Associations imported: %d\n", result.AssociationsImported) + fmt.Printf(" Raw memories imported: %d\n", result.RawMemoriesImported) + fmt.Printf(" Skipped duplicates: %d\n", result.SkippedDuplicates) + if len(result.Errors) > 0 { + fmt.Printf(" %sWarnings:%s %d\n", colorYellow, colorReset, len(result.Errors)) + } +} + +// backupCommand creates a timestamped backup with retention. +func backupCommand(configPath string) { + _, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + backupDir, err := backup.EnsureBackupDir() + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating backup directory: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Backing up to %s...\n", backupDir) + + backupPath, err := backup.BackupWithRetention(ctx, db, backupDir, 5) + if err != nil { + fmt.Fprintf(os.Stderr, "Backup failed: %v\n", err) + os.Exit(1) + } + + if info, err := os.Stat(backupPath); err == nil { + fmt.Printf("%sBackup complete.%s %s (%.1f KB)\n", colorGreen, colorReset, filepath.Base(backupPath), float64(info.Size())/1024) + } else { + fmt.Printf("%sBackup complete.%s %s\n", colorGreen, colorReset, filepath.Base(backupPath)) + } +} diff --git a/cmd/mnemonic/insights.go b/cmd/mnemonic/insights.go new file mode 100644 index 00000000..e2432dc7 --- /dev/null +++ b/cmd/mnemonic/insights.go @@ -0,0 +1,89 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + "time" +) + +// insightsCommand displays recent metacognition observations. +func insightsCommand(configPath string) { + _, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + observations, err := db.ListMetaObservations(ctx, "", 20) + if err != nil { + fmt.Fprintf(os.Stderr, "Error fetching insights: %v\n", err) + os.Exit(1) + } + + if len(observations) == 0 { + fmt.Println("No insights available yet. The metacognition agent runs periodically to analyze memory health.") + fmt.Println("Run manually with: mnemonic meta-cycle") + return + } + + fmt.Printf("%sMnemonic Insights%s\n\n", colorBold, colorReset) + + for _, obs := range observations { + // Severity color + severityColor := colorGray + switch obs.Severity { + case "warning": + severityColor = colorYellow + case "critical": + severityColor = colorRed + case "info": + severityColor = colorCyan + } + + // Format observation type + typeLabel := strings.ReplaceAll(obs.ObservationType, "_", " ") + typeLabel = strings.ToUpper(typeLabel[:1]) + typeLabel[1:] + + ago := time.Since(obs.CreatedAt).Round(time.Minute) + timeStr := formatDuration(ago) + if timeStr != "just now" { + timeStr += " ago" + } + fmt.Printf(" %s[%s]%s %s%s%s (%s)\n", + severityColor, strings.ToUpper(obs.Severity), colorReset, + colorBold, typeLabel, colorReset, + timeStr) + + // Print details + for key, val := range obs.Details { + keyLabel := strings.ReplaceAll(key, "_", " ") + fmt.Printf(" %s: %s\n", keyLabel, formatDetailValue(val)) + } + fmt.Println() + } +} + +// formatDetailValue renders a detail value in a human-friendly way. +func formatDetailValue(val interface{}) string { + switch v := val.(type) { + case float64: + if v == float64(int64(v)) { + return fmt.Sprintf("%d", int64(v)) + } + return fmt.Sprintf("%.1f%%", v*100) + case map[string]interface{}: + parts := []string{} + for k, mv := range v { + switch n := mv.(type) { + case float64: + parts = append(parts, fmt.Sprintf("%s=%d", k, int64(n))) + default: + parts = append(parts, fmt.Sprintf("%s=%v", k, mv)) + } + } + return strings.Join(parts, ", ") + default: + return fmt.Sprintf("%v", val) + } +} diff --git a/cmd/mnemonic/install.go b/cmd/mnemonic/install.go new file mode 100644 index 00000000..b2cf304d --- /dev/null +++ b/cmd/mnemonic/install.go @@ -0,0 +1,60 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" +) + +// installCommand registers mnemonic as a platform service (launchd on macOS, systemd on Linux). +func installCommand(configPath string) { + svc := daemon.NewServiceManager() + + // Validate config + _, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + // Resolve paths + absConfigPath, err := filepath.Abs(configPath) + if err != nil { + die(exitGeneral, fmt.Sprintf("resolving config path: %v", err), "") + } + + execPath, err := os.Executable() + if err != nil { + die(exitGeneral, fmt.Sprintf("finding executable: %v", err), "") + } + + if err := svc.Install(execPath, absConfigPath); err != nil { + die(exitPermission, fmt.Sprintf("installing service: %v", err), "check system permissions") + } + + fmt.Printf("%sService installed (%s).%s\n\n", colorGreen, svc.ServiceName(), colorReset) + fmt.Printf(" Binary: %s\n", execPath) + fmt.Printf(" Config: %s\n", absConfigPath) + fmt.Printf("\nMnemonic will now start automatically on login.\n") + fmt.Printf("To start immediately:\n") + fmt.Printf(" mnemonic start\n\n") + fmt.Printf("To check status:\n") + fmt.Printf(" mnemonic status\n\n") + fmt.Printf("To uninstall:\n") + fmt.Printf(" mnemonic uninstall\n") +} + +// uninstallCommand removes the platform service registration. +func uninstallCommand() { + svc := daemon.NewServiceManager() + + if err := svc.Uninstall(); err != nil { + fmt.Fprintf(os.Stderr, "Error uninstalling service: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sService uninstalled (%s).%s\n", colorGreen, svc.ServiceName(), colorReset) + fmt.Printf("Mnemonic will no longer start automatically on login.\n") +} diff --git a/cmd/mnemonic/main.go b/cmd/mnemonic/main.go index cb4229f5..532b6300 100644 --- a/cmd/mnemonic/main.go +++ b/cmd/mnemonic/main.go @@ -1,59 +1,9 @@ package main import ( - "bytes" - "context" - "crypto/rand" - "encoding/hex" - "encoding/json" "flag" "fmt" - "log/slog" - "math" - "net/http" "os" - "os/exec" - "os/signal" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" - - "github.com/appsprout-dev/mnemonic/internal/agent/agentutil" - "github.com/appsprout-dev/mnemonic/internal/config" - "github.com/appsprout-dev/mnemonic/internal/daemon" - "github.com/appsprout-dev/mnemonic/internal/events" - "github.com/appsprout-dev/mnemonic/internal/llm" - "github.com/appsprout-dev/mnemonic/internal/llm/llamacpp" - "github.com/appsprout-dev/mnemonic/internal/logger" - "github.com/appsprout-dev/mnemonic/internal/store/sqlite" - "github.com/appsprout-dev/mnemonic/internal/watcher" - - "github.com/appsprout-dev/mnemonic/internal/agent/abstraction" - "github.com/appsprout-dev/mnemonic/internal/agent/consolidation" - "github.com/appsprout-dev/mnemonic/internal/agent/dreaming" - "github.com/appsprout-dev/mnemonic/internal/agent/encoding" - "github.com/appsprout-dev/mnemonic/internal/agent/episoding" - "github.com/appsprout-dev/mnemonic/internal/agent/metacognition" - "github.com/appsprout-dev/mnemonic/internal/agent/orchestrator" - "github.com/appsprout-dev/mnemonic/internal/agent/perception" - "github.com/appsprout-dev/mnemonic/internal/agent/reactor" - "github.com/appsprout-dev/mnemonic/internal/agent/retrieval" - "github.com/appsprout-dev/mnemonic/internal/api" - "github.com/appsprout-dev/mnemonic/internal/api/routes" - "github.com/appsprout-dev/mnemonic/internal/backup" - "github.com/appsprout-dev/mnemonic/internal/mcp" - "github.com/appsprout-dev/mnemonic/internal/store" - "github.com/appsprout-dev/mnemonic/internal/updater" - - clipwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/clipboard" - fswatcher "github.com/appsprout-dev/mnemonic/internal/watcher/filesystem" - gitwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/git" - termwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/terminal" - - "github.com/google/uuid" - "github.com/gorilla/websocket" ) var Version = "dev" @@ -210,2645 +160,73 @@ func main() { } } -// ============================================================================ -// Daemon Management Commands (start / stop / restart) -// ============================================================================ - -// startCommand launches the mnemonic daemon in the background. -func startCommand(configPath string) { - svc := daemon.NewServiceManager() - - // If platform service is installed, use it - if svc.IsInstalled() { - if running, pid := svc.IsRunning(); running { - fmt.Printf("Mnemonic is already running (%s, PID %d)\n", svc.ServiceName(), pid) - os.Exit(1) - } - fmt.Printf("Starting mnemonic service...\n") - if err := svc.Start(); err != nil { - fmt.Fprintf(os.Stderr, "Error starting service: %v\n", err) - os.Exit(1) - } - // Wait and check if it started - time.Sleep(2 * time.Second) - if running, pid := svc.IsRunning(); running { - cfg, _ := config.Load(configPath) - fmt.Printf("%sMnemonic started%s (%s, PID %d)\n", colorGreen, colorReset, svc.ServiceName(), pid) - if cfg != nil { - fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) - healthURL := fmt.Sprintf("http://%s:%d/api/v1/health", cfg.API.Host, cfg.API.Port) - checkLLMFromAPI(healthURL, cfg.LLM.Endpoint, cfg.API.Token) - } - fmt.Printf(" Logs: %s\n", daemon.LogPath()) - } else { - fmt.Printf("%sWarning:%s Service started but process not running yet.\n", colorYellow, colorReset) - fmt.Printf(" Check logs: %s\n", daemon.LogPath()) - } - return - } - - // Fall back to PID-file-based daemon start - if running, pid := daemon.IsRunning(); running { - fmt.Printf("Mnemonic is already running (PID %d)\n", pid) - os.Exit(1) - } - - // Validate config can be loaded before starting - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - // Resolve to absolute config path (so daemon finds it after detach) - absConfigPath, err := filepath.Abs(configPath) - if err != nil { - die(exitGeneral, fmt.Sprintf("resolving config path: %v", err), "") - } - - // Get our binary path - execPath, err := os.Executable() - if err != nil { - die(exitGeneral, fmt.Sprintf("finding executable: %v", err), "") - } - - fmt.Printf("Starting mnemonic daemon...\n") - - pid, err := daemon.Start(execPath, absConfigPath) - if err != nil { - die(exitGeneral, fmt.Sprintf("starting daemon: %v", err), "mnemonic diagnose") - } - - // Wait briefly and verify daemon is healthy via API - time.Sleep(2 * time.Second) - apiURL := fmt.Sprintf("http://%s:%d/api/v1/health", cfg.API.Host, cfg.API.Port) - healthy := false - for i := 0; i < 3; i++ { - resp, err := apiGet(apiURL, cfg.API.Token) - if err == nil { - _ = resp.Body.Close() - if resp.StatusCode == http.StatusOK { - healthy = true - break - } - } - time.Sleep(1 * time.Second) - } - - if healthy { - fmt.Printf("%sMnemonic started%s (PID %d)\n", colorGreen, colorReset, pid) - fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) - fmt.Printf(" Logs: %s\n", daemon.LogPath()) - fmt.Printf(" PID file: %s\n", daemon.PIDFilePath()) - - // Check if LLM is available via health endpoint - checkLLMFromAPI(apiURL, cfg.LLM.Endpoint, cfg.API.Token) - } else { - fmt.Printf("%sWarning:%s Daemon started (PID %d) but health check failed.\n", colorYellow, colorReset, pid) - fmt.Printf(" Check logs: %s\n", daemon.LogPath()) - } -} - -// generateTokenCommand generates a random API token and prints it. -// ============================================================================ -// Update Commands (check-update / update) -// ============================================================================ - -func checkUpdateCommand() { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - - fmt.Printf("Checking for updates...\n") - info, err := updater.CheckForUpdate(ctx, Version) - if err != nil { - die(exitNetwork, "Update check failed", err.Error()) - } - - if info.UpdateAvailable { - fmt.Printf("\n Current: v%s\n", info.CurrentVersion) - fmt.Printf(" Latest: %sv%s%s\n\n", colorGreen, info.LatestVersion, colorReset) - fmt.Printf(" Run %smnemonic update%s to install.\n", colorBold, colorReset) - fmt.Printf(" Release: %s\n", info.ReleaseURL) - } else { - fmt.Printf("\n %sYou're up to date!%s (v%s)\n", colorGreen, colorReset, info.CurrentVersion) - } -} - -func updateCommand() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - fmt.Printf("Checking for updates...\n") - info, err := updater.CheckForUpdate(ctx, Version) - if err != nil { - die(exitNetwork, "Update check failed", err.Error()) - } - - if !info.UpdateAvailable { - fmt.Printf("%sAlready up to date%s (v%s)\n", colorGreen, colorReset, info.CurrentVersion) - return - } - - fmt.Printf("Downloading v%s...\n", info.LatestVersion) - result, err := updater.PerformUpdate(ctx, info) - if err != nil { - die(exitGeneral, "Update failed", err.Error()) - } - - fmt.Printf("%sUpdated: v%s → v%s%s\n", colorGreen, result.PreviousVersion, result.NewVersion, colorReset) - - // Restart daemon if it's running - svc := daemon.NewServiceManager() - if svc.IsInstalled() { - running, _ := svc.IsRunning() - if running { - fmt.Printf("Restarting daemon...\n") - if err := svc.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "%sWarning:%s failed to stop daemon: %v\n", colorYellow, colorReset, err) - fmt.Printf("Restart manually: mnemonic restart\n") - return - } - time.Sleep(1 * time.Second) - if err := svc.Start(); err != nil { - fmt.Fprintf(os.Stderr, "%sWarning:%s failed to start daemon: %v\n", colorYellow, colorReset, err) - fmt.Printf("Start manually: mnemonic start\n") - return - } - fmt.Printf("%sDaemon restarted with v%s%s\n", colorGreen, result.NewVersion, colorReset) - } - } -} - -func generateTokenCommand() { - b := make([]byte, 32) - if _, err := rand.Read(b); err != nil { - fmt.Fprintf(os.Stderr, "Error generating token: %v\n", err) - os.Exit(1) - } - token := hex.EncodeToString(b) - fmt.Printf("Generated API token:\n\n %s\n\n", token) - fmt.Printf("Add this to your config.yaml:\n\n api:\n token: \"%s\"\n\n", token) - fmt.Printf("Then set this environment variable for CLI tools:\n\n export MNEMONIC_API_TOKEN=\"%s\"\n", token) -} - -// apiGet performs an HTTP GET with optional bearer token auth. -func apiGet(url, token string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - if token != "" { - req.Header.Set("Authorization", "Bearer "+token) - } - return http.DefaultClient.Do(req) -} - -// checkLLMFromAPI queries the health endpoint and warns if LLM is unavailable. -func checkLLMFromAPI(healthURL, llmEndpoint, token string) { - resp, err := apiGet(healthURL, token) - if err != nil { - return - } - defer func() { _ = resp.Body.Close() }() - - var health map[string]interface{} - if json.NewDecoder(resp.Body).Decode(&health) != nil { - return - } - - llmAvail, _ := health["llm_available"].(bool) - if !llmAvail { - fmt.Printf("\n %s⚠ LLM provider is not reachable at %s%s\n", colorYellow, llmEndpoint, colorReset) - fmt.Printf(" Memory encoding will not work until the LLM provider is running.\n") - fmt.Printf(" Run 'mnemonic diagnose' for details.\n") - } -} - -// stopCommand stops the running mnemonic daemon. -func stopCommand() { - svc := daemon.NewServiceManager() - - // Check platform service first - if svc.IsInstalled() { - if running, pid := svc.IsRunning(); running { - fmt.Printf("Stopping mnemonic service (PID %d)...\n", pid) - if err := svc.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "Error stopping service: %v\n", err) - os.Exit(1) - } - // Wait for process to actually exit - time.Sleep(2 * time.Second) - fmt.Printf("%sMnemonic stopped.%s\n", colorGreen, colorReset) - return - } - } - - // Fall back to PID file - running, pid := daemon.IsRunning() - if !running { - fmt.Println("Mnemonic is not running.") - os.Exit(0) - } - - fmt.Printf("Stopping mnemonic daemon (PID %d)...\n", pid) - - if err := daemon.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "Error stopping daemon: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sMnemonic stopped.%s\n", colorGreen, colorReset) -} - -// restartCommand stops and starts the mnemonic daemon. -func restartCommand(configPath string) { - svc := daemon.NewServiceManager() - - // Check platform service first - if svc.IsInstalled() { - if running, pid := svc.IsRunning(); running { - fmt.Printf("Stopping mnemonic service (PID %d)...\n", pid) - if err := svc.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "Error stopping service: %v\n", err) - os.Exit(1) - } - time.Sleep(2 * time.Second) - } - startCommand(configPath) - return - } - - // Fall back to PID file - if running, pid := daemon.IsRunning(); running { - fmt.Printf("Stopping mnemonic daemon (PID %d)...\n", pid) - if err := daemon.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "Error stopping daemon: %v\n", err) - os.Exit(1) - } - time.Sleep(1 * time.Second) - } - - startCommand(configPath) -} - -// ============================================================================ -// Watch Command (live event tail) -// ============================================================================ - -// watchCommand connects to the daemon's WebSocket and streams live events. -func watchCommand(configPath string) { - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - wsURL := fmt.Sprintf("ws://%s:%d/ws", cfg.API.Host, cfg.API.Port) - - fmt.Printf("%sMnemonic Live Events%s — connecting to %s\n", colorBold, colorReset, wsURL) - fmt.Printf("Press Ctrl+C to stop.\n\n") - - // Connect to WebSocket - conn, _, err := websocket.DefaultDialer.Dial(wsURL, nil) - if err != nil { - die(exitNetwork, fmt.Sprintf("connecting to daemon: %v", err), "mnemonic start") - } - defer func() { _ = conn.Close() }() - - // Handle Ctrl+C - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, shutdownSignals()...) - - go func() { - <-sigChan - fmt.Printf("\n%sStopping event watch.%s\n", colorGray, colorReset) - _ = conn.Close() - os.Exit(0) - }() - - // Read and display events - for { - _, message, err := conn.ReadMessage() - if err != nil { - if websocket.IsCloseError(err, websocket.CloseNormalClosure) { - fmt.Println("Connection closed.") - } else { - fmt.Fprintf(os.Stderr, "\nWebSocket disconnected: %v\n", err) - } - return - } - - formatWatchEvent(message) - } -} - -// formatWatchEvent formats and prints a WebSocket event with colors. -func formatWatchEvent(data []byte) { - var evt map[string]interface{} - if err := json.Unmarshal(data, &evt); err != nil { - // Raw text event - ts := time.Now().Format("15:04:05") - fmt.Printf("%s%s%s %s\n", colorGray, ts, colorReset, string(data)) - return - } - - eventType, _ := evt["type"].(string) - ts := time.Now().Format("15:04:05") - - switch eventType { - case "raw_memory_created": - source, _ := evt["source"].(string) - id, _ := evt["id"].(string) - shortID := truncID(id) - fmt.Printf("%s%s%s %s▶ PERCEIVED%s [%s] %s\n", - colorGray, ts, colorReset, colorCyan, colorReset, source, shortID) - - case "memory_encoded": - id, _ := evt["id"].(string) - shortID := truncID(id) - fmt.Printf("%s%s%s %s▶ ENCODED%s %s\n", - colorGray, ts, colorReset, colorGreen, colorReset, shortID) - - case "consolidation_completed": - processed, _ := evt["memories_processed"].(float64) - decayed, _ := evt["memories_decayed"].(float64) - merged, _ := evt["merged_clusters"].(float64) - pruned, _ := evt["associations_pruned"].(float64) - durationMs, _ := evt["duration_ms"].(float64) - fmt.Printf("%s%s%s %s▶ CONSOLIDATED%s processed=%d decayed=%d merged=%d pruned=%d (%dms)\n", - colorGray, ts, colorReset, colorYellow, colorReset, - int(processed), int(decayed), int(merged), int(pruned), int(durationMs)) - - case "query_executed": - query, _ := evt["query"].(string) - results, _ := evt["result_count"].(float64) - took, _ := evt["took_ms"].(float64) - fmt.Printf("%s%s%s %s▶ QUERY%s \"%s\" → %d results (%dms)\n", - colorGray, ts, colorReset, colorBlue, colorReset, - query, int(results), int(took)) - - case "dream_cycle_completed": - replayed, _ := evt["memories_replayed"].(float64) - strengthened, _ := evt["associations_strengthened"].(float64) - newAssoc, _ := evt["new_associations_created"].(float64) - demoted, _ := evt["noisy_memories_demoted"].(float64) - durationMs, _ := evt["duration_ms"].(float64) - fmt.Printf("%s%s%s %s▶ DREAMED%s replayed=%d strengthened=%d new_assoc=%d demoted=%d (%dms)\n", - colorGray, ts, colorReset, colorCyan, colorReset, - int(replayed), int(strengthened), int(newAssoc), int(demoted), int(durationMs)) - - case "meta_cycle_completed": - observations, _ := evt["observations_logged"].(float64) - fmt.Printf("%s%s%s %s▶ META%s observations=%d\n", - colorGray, ts, colorReset, colorCyan, colorReset, int(observations)) - - default: - // Generic event - fmt.Printf("%s%s%s %s▶ %s%s %s\n", - colorGray, ts, colorReset, colorGray, eventType, colorReset, - string(data)) - } -} - -// truncID shortens a UUID for display. -func truncID(id string) string { - if len(id) > 8 { - return id[:8] - } - return id -} - -// ============================================================================ -// Enhanced Status Command -// ============================================================================ - -// statusCommand displays comprehensive system status. -func statusCommand(configPath string) { - svc := daemon.NewServiceManager() - - cfg, err := config.Load(configPath) - if err != nil { - // Even without config, show daemon state - fmt.Printf("%sMnemonic v%s Status%s\n\n", colorBold, Version, colorReset) - if svcRunning, svcPid := svc.IsRunning(); svcRunning { - fmt.Printf(" Daemon: %srunning%s (%s, PID %d)\n", colorGreen, colorReset, svc.ServiceName(), svcPid) - } else if running, pid := daemon.IsRunning(); running { - fmt.Printf(" Daemon: %srunning%s (PID %d)\n", colorGreen, colorReset, pid) - } else { - fmt.Printf(" Daemon: %sstopped%s\n", colorRed, colorReset) - } - fmt.Fprintf(os.Stderr, " (Config error: %v)\n", err) - return - } - - fmt.Printf("%sMnemonic v%s Status%s\n\n", colorBold, Version, colorReset) - - // Daemon state — check platform service first, then PID file - running := false - pid := 0 - mode := "" - if svcRunning, svcPid := svc.IsRunning(); svcRunning { - running, pid, mode = true, svcPid, fmt.Sprintf(" (%s)", svc.ServiceName()) - } else if pidRunning, pidPid := daemon.IsRunning(); pidRunning { - running, pid = true, pidPid - } - if running { - fmt.Printf(" Daemon: %srunning%s%s (PID %d)\n", colorGreen, colorReset, mode, pid) - } else { - fmt.Printf(" Daemon: %sstopped%s\n", colorRed, colorReset) - } - - // Try to get live status from the API - apiBase := fmt.Sprintf("http://%s:%d/api/v1", cfg.API.Host, cfg.API.Port) - apiReachable := false - - // Health check - healthResp, err := apiGet(apiBase+"/health", cfg.API.Token) - if err == nil { - defer func() { _ = healthResp.Body.Close() }() - if healthResp.StatusCode == http.StatusOK { - apiReachable = true - var health map[string]interface{} - if json.NewDecoder(healthResp.Body).Decode(&health) == nil { - llmStatus, _ := health["llm"].(string) - storeStatus, _ := health["store"].(string) - - llmColor := colorGreen - if llmStatus != "ok" { - llmColor = colorRed - } - storeColor := colorGreen - if storeStatus != "ok" { - storeColor = colorRed - } - - fmt.Printf(" API: %slistening%s on %s:%d\n", colorGreen, colorReset, cfg.API.Host, cfg.API.Port) - fmt.Printf(" LLM: %s%s%s (%s)\n", llmColor, llmStatus, colorReset, cfg.LLM.ChatModel) - fmt.Printf(" Store: %s%s%s\n", storeColor, storeStatus, colorReset) - } - } - } - - if !apiReachable { - fmt.Printf(" API: %sunreachable%s\n", colorRed, colorReset) - } - - // Memory stats — from API if available, else direct DB - fmt.Printf("\n %sMemory Store%s\n", colorBold, colorReset) - - if apiReachable { - statsResp, err := apiGet(apiBase+"/stats", cfg.API.Token) - if err == nil { - defer func() { _ = statsResp.Body.Close() }() - var data map[string]interface{} - if json.NewDecoder(statsResp.Body).Decode(&data) == nil { - s, _ := data["store"].(map[string]interface{}) - if s == nil { - s = data - } - total := intVal(s, "total_memories") - active := intVal(s, "active_memories") - fading := intVal(s, "fading_memories") - archived := intVal(s, "archived_memories") - merged := intVal(s, "merged_memories") - assoc := intVal(s, "total_associations") - dbSize := intVal(s, "storage_size_bytes") - - fmt.Printf(" Total: %d\n", total) - fmt.Printf(" Active: %s%d%s\n", colorGreen, active, colorReset) - fmt.Printf(" Fading: %s%d%s\n", colorYellow, fading, colorReset) - fmt.Printf(" Archived: %s%d%s\n", colorGray, archived, colorReset) - fmt.Printf(" Merged: %d\n", merged) - fmt.Printf(" Associations: %d\n", assoc) - fmt.Printf(" DB size: %.1f KB\n", float64(dbSize)/1024) - } - } - } else { - // Fall back to direct DB access - db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err == nil { - defer func() { _ = db.Close() }() - ctx := context.Background() - stats, err := db.GetStatistics(ctx) - if err == nil { - fmt.Printf(" Total: %d\n", stats.TotalMemories) - fmt.Printf(" Active: %s%d%s\n", colorGreen, stats.ActiveMemories, colorReset) - fmt.Printf(" Fading: %s%d%s\n", colorYellow, stats.FadingMemories, colorReset) - fmt.Printf(" Archived: %s%d%s\n", colorGray, stats.ArchivedMemories, colorReset) - fmt.Printf(" Merged: %d\n", stats.MergedMemories) - fmt.Printf(" Associations: %d\n", stats.TotalAssociations) - fmt.Printf(" DB size: %.1f KB\n", float64(stats.StorageSizeBytes)/1024) - } - } - } - - // Encoding queue depth — direct DB query - fmt.Printf("\n %sEncoding Queue%s\n", colorBold, colorReset) - { - db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err == nil { - defer func() { _ = db.Close() }() - ctx := context.Background() - var unprocessed int - row := db.DB().QueryRowContext(ctx, "SELECT COUNT(*) FROM raw_memories WHERE processed = 0") - if row.Scan(&unprocessed) == nil { - queueColor := colorGreen - queueNote := "" - if unprocessed > 500 { - queueColor = colorRed - queueNote = " (LLM may be down — run 'mnemonic diagnose')" - } else if unprocessed > 100 { - queueColor = colorYellow - queueNote = " (processing)" - } - fmt.Printf(" Unprocessed: %s%d%s%s\n", queueColor, unprocessed, colorReset, queueNote) - } - } - } - - // Consolidation status — check last consolidation from DB - fmt.Printf("\n %sConsolidation%s\n", colorBold, colorReset) - if cfg.Consolidation.Enabled { - fmt.Printf(" Enabled: yes (every %s)\n", cfg.Consolidation.IntervalRaw) - db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err == nil { - defer func() { _ = db.Close() }() - lastConsolidation := getLastConsolidation(db) - if lastConsolidation != "" { - fmt.Printf(" Last run: %s\n", lastConsolidation) - } else { - fmt.Printf(" Last run: %snever%s\n", colorGray, colorReset) - } - } - } else { - fmt.Printf(" Enabled: no\n") - } - - // Perception config - fmt.Printf("\n %sPerception%s\n", colorBold, colorReset) - if cfg.Perception.Enabled { - if cfg.Perception.Filesystem.Enabled { - fmt.Printf(" Filesystem: %senabled%s (%d dirs)\n", colorGreen, colorReset, len(cfg.Perception.Filesystem.WatchDirs)) - } else { - fmt.Printf(" Filesystem: %sdisabled%s\n", colorGray, colorReset) - } - if cfg.Perception.Terminal.Enabled { - fmt.Printf(" Terminal: %senabled%s (poll %ds)\n", colorGreen, colorReset, cfg.Perception.Terminal.PollIntervalSec) - } else { - fmt.Printf(" Terminal: %sdisabled%s\n", colorGray, colorReset) - } - if cfg.Perception.Clipboard.Enabled { - fmt.Printf(" Clipboard: %senabled%s\n", colorGreen, colorReset) - } else { - fmt.Printf(" Clipboard: %sdisabled%s\n", colorGray, colorReset) - } - } else { - fmt.Printf(" All perception: %sdisabled%s\n", colorGray, colorReset) - } - - // Paths - fmt.Printf("\n %sPaths%s\n", colorBold, colorReset) - fmt.Printf(" Config: %s\n", configPath) - fmt.Printf(" Database: %s\n", cfg.Store.DBPath) - fmt.Printf(" Log: %s\n", daemon.LogPath()) - fmt.Printf(" PID: %s\n", daemon.PIDFilePath()) - fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) - fmt.Println() -} - -// intVal safely extracts an int from a JSON map. -func intVal(m map[string]interface{}, key string) int { - if v, ok := m[key]; ok { - switch n := v.(type) { - case float64: - return int(n) - case int: - return n - } - } - return 0 -} - -// getLastConsolidation queries for the last consolidation timestamp. -func getLastConsolidation(db *sqlite.SQLiteStore) string { - ctx := context.Background() - record, err := db.GetLastConsolidation(ctx) - if err != nil { - return "" - } - if record.ID == "" { - return "" - } - ago := time.Since(record.EndTime).Round(time.Minute) - return fmt.Sprintf("%s (%s ago, %d memories, %dms)", record.EndTime.Format("Jan 2 15:04"), formatDuration(ago), record.MemoriesProcessed, record.DurationMs) -} - -// formatDuration formats a duration as human-readable. -func formatDuration(d time.Duration) string { - if d < time.Minute { - return "just now" - } - if d < time.Hour { - mins := int(d.Minutes()) - return fmt.Sprintf("%dm", mins) - } - if d < 24*time.Hour { - hours := int(d.Hours()) - return fmt.Sprintf("%dh", hours) - } - days := int(d.Hours() / 24) - return fmt.Sprintf("%dd", days) -} - -// ============================================================================ -// Diagnose -// ============================================================================ - -// diagnoseCommand runs a series of health checks and reports PASS/FAIL/WARN. -func diagnoseCommand(configPath string) { - fmt.Printf("%sMnemonic v%s — Diagnostics%s\n\n", colorBold, Version, colorReset) - - passed, warned, failed := 0, 0, 0 - - pass := func(label, detail string) { - fmt.Printf(" %-16s %sPASS%s %s\n", label, colorGreen, colorReset, detail) - passed++ - } - warn := func(label, detail string) { - fmt.Printf(" %-16s %sWARN%s %s\n", label, colorYellow, colorReset, detail) - warned++ - } - fail := func(label, detail string) { - fmt.Printf(" %-16s %sFAIL%s %s\n", label, colorRed, colorReset, detail) - failed++ - } - - // 1. Config - cfg, err := config.Load(configPath) - if err != nil { - fail("Config", fmt.Sprintf("failed to load %s: %v", configPath, err)) - // Can't continue most checks without config - fmt.Printf("\n %s%d passed, %d warnings, %d failed%s\n\n", colorBold, passed, warned, failed, colorReset) - if failed > 0 { - os.Exit(1) - } - return - } - pass("Config", fmt.Sprintf("loaded from %s", configPath)) +// printUsage prints the command usage. +func printUsage() { + usage := `mnemonic v%s - A semantic memory system daemon - // 2. Data directory - home, homeErr := os.UserHomeDir() - if homeErr != nil { - fail("Data dir", fmt.Sprintf("cannot determine home directory: %v", homeErr)) - } else { - dataPath := filepath.Join(home, ".mnemonic") - info, err := os.Stat(dataPath) - if err != nil { - warn("Data dir", fmt.Sprintf("%s does not exist (will be created on first serve)", dataPath)) - } else if !info.IsDir() { - fail("Data dir", fmt.Sprintf("%s exists but is not a directory", dataPath)) - } else { - // Check writable by creating a temp file - tmpPath := filepath.Join(dataPath, ".diagnose_test") - if err := os.WriteFile(tmpPath, []byte("test"), 0600); err != nil { - fail("Data dir", fmt.Sprintf("%s is not writable: %v", dataPath, err)) - } else { - _ = os.Remove(tmpPath) - pass("Data dir", dataPath) - } - } - } +USAGE: + mnemonic [OPTIONS] [COMMAND] - // 3. Database - var diagDB *sqlite.SQLiteStore - dbInfo, dbErr := os.Stat(cfg.Store.DBPath) - if dbErr != nil { - fail("Database", fmt.Sprintf("file not found: %s", cfg.Store.DBPath)) - } else { - dbSizeMB := float64(dbInfo.Size()) / (1024 * 1024) +OPTIONS: + --config PATH Path to config.yaml (default: "config.yaml") + --help Show this help message - db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err != nil { - fail("Database", fmt.Sprintf("cannot open: %v", err)) - } else { - diagDB = db - defer func() { _ = diagDB.Close() }() - ctx := context.Background() +DAEMON COMMANDS: + start Start the mnemonic daemon (background) + stop Stop the running daemon + restart Restart the daemon + serve Run in foreground (for debugging) - // Integrity check - var integrityResult string - row := diagDB.DB().QueryRowContext(ctx, "PRAGMA integrity_check") - if err := row.Scan(&integrityResult); err != nil { - fail("Database", fmt.Sprintf("integrity check error: %v", err)) - } else if integrityResult != "ok" { - fail("Database", fmt.Sprintf("integrity check: %s", integrityResult)) - } else { - stats, err := diagDB.GetStatistics(ctx) - if err != nil { - warn("Database", fmt.Sprintf("integrity OK but stats failed: %v", err)) - } else { - pass("Database", fmt.Sprintf("integrity OK, %d memories (%d active), %.1f MB", - stats.TotalMemories, stats.ActiveMemories, dbSizeMB)) - } - } - } - } +MEMORY COMMANDS: + remember TEXT Store text in memory + recall QUERY Retrieve memories matching query + consolidate Run memory consolidation cycle - // 4. LLM provider - llmProvider := newLLMProvider(cfg) +DATA MANAGEMENT: + ingest DIR Bulk-ingest a directory (--dry-run, --project NAME) + export Export memories (--format json|sqlite, --output path) + import FILE Import from JSON export (--mode merge|replace) + backup Timestamped backup with retention (keeps last 5) + restore FILE Restore database from a SQLite backup file + cleanup Remove noise: mark excluded-path raw events as processed (--yes) + purge Stop daemon and delete all data (fresh start) + insights Show metacognition observations (memory health) + meta-cycle Run a single metacognition analysis cycle + dream-cycle Run a single dream replay cycle - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() +AI AGENT INTEGRATION: + mcp Run MCP server on stdin/stdout (for AI agents) - if err := llmProvider.Health(ctx); err != nil { - fail("LLM", fmt.Sprintf("LLM provider not reachable at %s (%v)", cfg.LLM.Endpoint, err)) - } else { - // Try a quick embedding to verify the model works - _, embErr := llmProvider.Embed(ctx, "test") - if embErr != nil { - warn("LLM", fmt.Sprintf("reachable at %s but embedding failed: %v", cfg.LLM.Endpoint, embErr)) - } else { - pass("LLM", fmt.Sprintf("model %s at %s", cfg.LLM.ChatModel, cfg.LLM.Endpoint)) - } - } +MONITORING COMMANDS: + status Show comprehensive system status + diagnose Run health checks (config, DB, LLM, disk) + watch Live stream of daemon events - // 5. Daemon - svc := daemon.NewServiceManager() - if svcRunning, svcPid := svc.IsRunning(); svcRunning { - pass("Daemon", fmt.Sprintf("running (%s, PID %d)", svc.ServiceName(), svcPid)) - } else if running, pid := daemon.IsRunning(); running { - pass("Daemon", fmt.Sprintf("running (PID %d)", pid)) - } else { - warn("Daemon", "not running — use 'mnemonic start' or 'mnemonic serve'") - } +UPDATE COMMANDS: + check-update Check if a newer version is available + update Download and install the latest version - // 6. Disk space - if homeErr == nil { - dbDir := filepath.Dir(cfg.Store.DBPath) - availBytes, err := diskAvailable(dbDir) - if err == nil { - availGB := float64(availBytes) / (1024 * 1024 * 1024) - if availGB < 1.0 { - fail("Disk", fmt.Sprintf("%.1f GB available on %s — critically low", availGB, dbDir)) - } else if availGB < 5.0 { - warn("Disk", fmt.Sprintf("%.1f GB available on %s", availGB, dbDir)) - } else { - pass("Disk", fmt.Sprintf("%.0f GB available", availGB)) - } - } - // If we can't check disk, just skip silently (platform-specific) - } +SETUP COMMANDS: + install Install as system service (auto-start on login) + uninstall Remove system service + generate-token Generate a random API authentication token + version Show version - // 7. Encoding queue (reuse DB connection from check 3) - if diagDB != nil { - ctx := context.Background() - var unprocessed int - row := diagDB.DB().QueryRowContext(ctx, "SELECT COUNT(*) FROM raw_memories WHERE processed = 0") - if row.Scan(&unprocessed) == nil { - if unprocessed > 500 { - warn("Encoding queue", fmt.Sprintf("%d unprocessed raw memories (LLM may be falling behind)", unprocessed)) - } else { - pass("Encoding queue", fmt.Sprintf("%d unprocessed", unprocessed)) - } - } - } - - // Summary - fmt.Println() - if failed > 0 { - fmt.Printf(" %s%d passed, %d warnings, %d failed%s\n\n", colorRed, passed, warned, failed, colorReset) - os.Exit(1) - } else if warned > 0 { - fmt.Printf(" %s%d passed, %d warnings%s\n\n", colorYellow, passed, warned, colorReset) - } else { - fmt.Printf(" %sAll %d checks passed%s\n\n", colorGreen, passed, colorReset) - } -} - -// ============================================================================ -// Install / Uninstall (platform service) -// ============================================================================ - -// installCommand registers mnemonic as a platform service (launchd on macOS, systemd on Linux). -func installCommand(configPath string) { - svc := daemon.NewServiceManager() - - // Validate config - _, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - // Resolve paths - absConfigPath, err := filepath.Abs(configPath) - if err != nil { - die(exitGeneral, fmt.Sprintf("resolving config path: %v", err), "") - } - - execPath, err := os.Executable() - if err != nil { - die(exitGeneral, fmt.Sprintf("finding executable: %v", err), "") - } - - if err := svc.Install(execPath, absConfigPath); err != nil { - die(exitPermission, fmt.Sprintf("installing service: %v", err), "check system permissions") - } - - fmt.Printf("%sService installed (%s).%s\n\n", colorGreen, svc.ServiceName(), colorReset) - fmt.Printf(" Binary: %s\n", execPath) - fmt.Printf(" Config: %s\n", absConfigPath) - fmt.Printf("\nMnemonic will now start automatically on login.\n") - fmt.Printf("To start immediately:\n") - fmt.Printf(" mnemonic start\n\n") - fmt.Printf("To check status:\n") - fmt.Printf(" mnemonic status\n\n") - fmt.Printf("To uninstall:\n") - fmt.Printf(" mnemonic uninstall\n") -} - -// uninstallCommand removes the platform service registration. -func uninstallCommand() { - svc := daemon.NewServiceManager() - - if err := svc.Uninstall(); err != nil { - fmt.Fprintf(os.Stderr, "Error uninstalling service: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sService uninstalled (%s).%s\n", colorGreen, svc.ServiceName(), colorReset) - fmt.Printf("Mnemonic will no longer start automatically on login.\n") -} - -// ============================================================================ -// Serve Command (the actual daemon) -// ============================================================================ - -// startAgentWebServer starts the Python WebSocket agent server as a child process. -// Returns the started Cmd and a channel that receives the Wait() result when the -// process exits. The caller must use the channel instead of calling cmd.Wait() -// directly, since the background monitor goroutine owns the single Wait() call. -// Returns (nil, nil) if disabled or failed to start. -func startAgentWebServer(cfg *config.Config, log *slog.Logger) (*exec.Cmd, <-chan error) { - if !cfg.AgentSDK.Enabled || cfg.AgentSDK.EvolutionDir == "" { - return nil, nil - } - - port := cfg.AgentSDK.WebPort - if port == 0 { - port = 9998 - } - - // SDK directory: evolution_dir is sdk/agent/evolution, so sdk/ is two levels up. - sdkDir := filepath.Dir(filepath.Dir(cfg.AgentSDK.EvolutionDir)) - - // Determine python binary: prefer explicit config, then venv Python (has - // all SDK deps installed), then uv, then system python3/python. - pythonBin := cfg.AgentSDK.PythonBin - if pythonBin == "" { - // Venv layout differs by platform: bin/python3 (Unix) vs Scripts/python.exe (Windows) - venvPython := filepath.Join(sdkDir, ".venv", "bin", "python3") - if runtime.GOOS == "windows" { - venvPython = filepath.Join(sdkDir, ".venv", "Scripts", "python.exe") - } - if _, err := os.Stat(venvPython); err == nil { - pythonBin = venvPython - } else if uvPath, err := exec.LookPath("uv"); err == nil { - pythonBin = uvPath - } else if py3, err := exec.LookPath("python3"); err == nil { - pythonBin = py3 - } else if py, err := exec.LookPath("python"); err == nil { - // Windows typically has "python" not "python3" - pythonBin = py - } else { - log.Error("cannot find python3 or uv to start agent web server") - return nil, nil - } - } - - // Build command arguments. - var args []string - if strings.HasSuffix(filepath.Base(pythonBin), "uv") { - args = []string{"run", "python", "-m", "agent.web"} - } else { - args = []string{"-m", "agent.web"} - } - - // Resolve mnemonic binary and config paths relative to project root. - projectRoot := filepath.Dir(sdkDir) - binaryName := "mnemonic" - if runtime.GOOS == "windows" { - binaryName = "mnemonic.exe" - } - args = append(args, - "--port", fmt.Sprintf("%d", port), - "--mnemonic-config", filepath.Join(projectRoot, "config.yaml"), - "--mnemonic-binary", filepath.Join(projectRoot, "bin", binaryName), - ) - - cmd := exec.Command(pythonBin, args...) - cmd.Dir = sdkDir - - // Capture stderr so missing-dependency tracebacks don't pollute the console. - var stderrBuf bytes.Buffer - cmd.Stdout = os.Stdout - cmd.Stderr = &stderrBuf - - // Strip CLAUDECODE env var so the bundled Claude CLI doesn't refuse - // to start (nested session detection). - env := os.Environ() - filtered := env[:0] - for _, e := range env { - if !strings.HasPrefix(e, "CLAUDECODE=") { - filtered = append(filtered, e) - } - } - cmd.Env = filtered - - if err := cmd.Start(); err != nil { - log.Error("failed to start agent web server", "error", err, "python_bin", pythonBin) - return nil, nil - } - - log.Info("agent web server started", "pid", cmd.Process.Pid, "port", port, "sdk_dir", sdkDir) - - // Monitor the process in background — if it exits quickly, log a clean warning - // instead of dumping a raw Python traceback. This goroutine owns the single - // cmd.Wait() call; the done channel lets the shutdown path wait for exit - // without calling Wait() a second time (which would race). - done := make(chan error, 1) - go func() { - err := cmd.Wait() - if err != nil { - stderr := strings.TrimSpace(stderrBuf.String()) - if strings.Contains(stderr, "ModuleNotFoundError") || strings.Contains(stderr, "No module named") { - log.Warn("agent web server exited: missing Python dependency — install SDK requirements to enable", - "hint", "cd sdk && pip install -r requirements.txt") - } else { - log.Warn("agent web server exited unexpectedly", "error", err, "stderr", stderr) - } - } - done <- err - }() - - return cmd, done -} - -// serveCommand runs the mnemonic daemon. -func serveCommand(configPath string) { - // If running as a Windows Service, delegate to the service handler. - if daemon.IsWindowsService() { - execPath, _ := os.Executable() - if err := daemon.RunAsService(execPath, configPath); err != nil { - die(exitGeneral, fmt.Sprintf("running as Windows service: %v", err), "") - } - return - } - - // Load configuration - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - // Check config file permissions - if warn := config.WarnPermissions(configPath); warn != "" { - fmt.Fprintf(os.Stderr, "Warning: %s\n", warn) - } - - // Build project resolver from config - projectResolver := config.NewProjectResolver(cfg.Projects) - - // Initialize logger - log, err := logger.New(logger.Config{ - Level: cfg.Logging.Level, - Format: cfg.Logging.Format, - File: cfg.Logging.File, - }) - if err != nil { - die(exitConfig, fmt.Sprintf("initializing logger: %v", err), "check logging config in config.yaml") - } - slog.SetDefault(log) - - // Clean up leftover .old binary from a previous Windows update - if err := updater.CleanupOldBinary(); err != nil { - log.Warn("failed to clean up old binary after update", "error", err) - } - - // Create data directory if it doesn't exist - if err := cfg.EnsureDataDir(); err != nil { - die(exitPermission, fmt.Sprintf("creating data directory: %v", err), "check permissions on ~/.mnemonic/") - } - - // Pre-migration safety backup (only if DB exists AND schema is outdated) - if _, statErr := os.Stat(cfg.Store.DBPath); statErr == nil { - currentVer, verErr := backup.ReadSchemaVersion(cfg.Store.DBPath) - if verErr != nil { - log.Warn("could not read schema version, will back up defensively", "error", verErr) - currentVer = -1 // force backup - } - if currentVer < sqlite.SchemaVersion { - backupDir, bdErr := backup.EnsureBackupDir() - if bdErr != nil { - log.Warn("could not create backup directory for pre-migration backup", "error", bdErr) - } else { - bkPath, bkErr := backup.BackupSQLiteFile(cfg.Store.DBPath, backupDir) - if bkErr != nil { - log.Warn("pre-migration backup failed", "error", bkErr) - } else if bkPath != "" { - log.Info("pre-migration backup created", "path", bkPath) - } - if pruneErr := backup.PruneOldBackups(backupDir, 3); pruneErr != nil { - log.Warn("failed to prune old backups", "error", pruneErr) - } - } - } else { - log.Debug("schema is current, skipping pre-migration backup") - } - } - - // Open SQLite store - memStore, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err != nil { - die(exitDatabase, fmt.Sprintf("opening database %s: %v", cfg.Store.DBPath, err), "mnemonic diagnose") - } - - // Run integrity check on startup - intCtx, intCancel := context.WithTimeout(context.Background(), 30*time.Second) - if intErr := memStore.CheckIntegrity(intCtx); intErr != nil { - log.Error("database integrity check failed", "error", intErr) - fmt.Fprintf(os.Stderr, "\n%s✗ DATABASE CORRUPTION DETECTED%s\n", colorRed, colorReset) - fmt.Fprintf(os.Stderr, " %v\n", intErr) - fmt.Fprintf(os.Stderr, " A pre-migration backup was saved. Use 'mnemonic restore ' to recover.\n\n") - } else { - log.Info("database integrity check passed") - } - intCancel() - - // Check available disk space - dbDir := filepath.Dir(cfg.Store.DBPath) - if availBytes, diskErr := diskAvailable(dbDir); diskErr == nil { - availMB := availBytes / (1024 * 1024) - if availMB < 100 { - log.Error("critically low disk space", "available_mb", availMB, "path", dbDir) - fmt.Fprintf(os.Stderr, "\n%s✗ CRITICALLY LOW DISK SPACE: %d MB available%s\n", colorRed, availMB, colorReset) - fmt.Fprintf(os.Stderr, " Database writes may fail. Free up disk space before continuing.\n\n") - } else if availMB < 500 { - log.Warn("low disk space", "available_mb", availMB, "path", dbDir) - fmt.Fprintf(os.Stderr, "\n%s⚠ Low disk space: %d MB available%s\n", colorYellow, availMB, colorReset) - } - } - - // Create LLM provider - llmProvider := newLLMProvider(cfg) - - // Check for embedding model drift - embModel := cfg.LLM.EmbeddingModel - if cfg.LLM.Provider == "embedded" && cfg.LLM.Embedded.EmbedModelFile != "" { - embModel = cfg.LLM.Embedded.EmbedModelFile - } - if embModel != "" { - metaCtx, metaCancel := context.WithTimeout(context.Background(), 5*time.Second) - prevModel, _ := memStore.GetMeta(metaCtx, "embedding_model") - metaCancel() - - if prevModel != "" && prevModel != embModel { - log.Warn("embedding model changed", "previous", prevModel, "current", embModel) - fmt.Fprintf(os.Stderr, "\n%s⚠ Embedding model changed: %s → %s%s\n", colorYellow, prevModel, embModel, colorReset) - fmt.Fprintf(os.Stderr, " Existing semantic search may return degraded results.\n") - fmt.Fprintf(os.Stderr, " Old embeddings are from a different vector space.\n\n") - } - - metaCtx2, metaCancel2 := context.WithTimeout(context.Background(), 5*time.Second) - _ = memStore.SetMeta(metaCtx2, "embedding_model", embModel) - metaCancel2() - } - - // Detect version changes and create a memory for release awareness - if Version != "" { - verCtx, verCancel := context.WithTimeout(context.Background(), 5*time.Second) - prevVersion, _ := memStore.GetMeta(verCtx, "daemon_version") - verCancel() - - if prevVersion != "" && prevVersion != Version { - log.Info("version changed", "previous", prevVersion, "current", Version) - raw := store.RawMemory{ - ID: uuid.New().String(), - Source: "system", - Type: "version_change", - Content: fmt.Sprintf("Mnemonic updated from %s to %s", prevVersion, Version), - Timestamp: time.Now(), - Project: "mnemonic", - InitialSalience: 0.7, - } - writeCtx, writeCancel := context.WithTimeout(context.Background(), 5*time.Second) - if err := memStore.WriteRaw(writeCtx, raw); err != nil { - log.Warn("failed to record version change", "error", err) - } else { - log.Info("recorded version change memory", "from", prevVersion, "to", Version) - } - writeCancel() - } - - setCtx, setCancel := context.WithTimeout(context.Background(), 5*time.Second) - _ = memStore.SetMeta(setCtx, "daemon_version", Version) - setCancel() - } - - // Create event bus - bus := events.NewInMemoryBus(bufferSize) - defer func() { _ = bus.Close() }() - - // Check LLM health (warn loudly if unavailable, don't fail startup) - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.LLM.TimeoutSec)*time.Second) - if err := llmProvider.Health(ctx); err != nil { - log.Warn("LLM provider unavailable at startup", "endpoint", cfg.LLM.Endpoint, "error", err) - fmt.Fprintf(os.Stderr, "\n%s⚠ WARNING: LLM provider is not reachable at %s%s\n", colorYellow, cfg.LLM.Endpoint, colorReset) - fmt.Fprintf(os.Stderr, " Memory encoding will not work until the LLM provider is running.\n") - fmt.Fprintf(os.Stderr, " Raw observations will queue and be processed once the LLM provider is available.\n") - fmt.Fprintf(os.Stderr, " Run 'mnemonic diagnose' for a full health check.\n\n") - } - cancel() - - // Log startup info - embCount, embLoadTime := memStore.EmbeddingIndexStats() - log.Info("mnemonic daemon starting", - "version", Version, - "config_path", configPath, - "db_path", cfg.Store.DBPath, - "llm_endpoint", cfg.LLM.Endpoint, - "llm_chat_model", cfg.LLM.ChatModel, - "llm_embedding_model", cfg.LLM.EmbeddingModel, - "embedding_index_size", embCount, - "embedding_index_load_ms", embLoadTime.Milliseconds(), - ) - if embCount > 50000 { - log.Warn("large embedding index — consider ANN index for better performance", - "count", embCount, "load_ms", embLoadTime.Milliseconds()) - } - - // Create a root context for all agents - rootCtx, rootCancel := context.WithCancel(context.Background()) - defer rootCancel() - - // Instrumented provider wrapper — gives each agent its own usage tracking. - // If training data capture is enabled, wrap with TrainingCaptureProvider too. - modelLabel := cfg.LLM.ChatModel - if cfg.LLM.Provider == "embedded" && cfg.LLM.Embedded.ChatModelFile != "" { - modelLabel = cfg.LLM.Embedded.ChatModelFile - } - wrap := func(caller string) llm.Provider { - var p llm.Provider = llm.NewInstrumentedProvider(llmProvider, memStore, caller, modelLabel) - if cfg.Training.CaptureEnabled && cfg.Training.CaptureDir != "" { - p = llm.NewTrainingCaptureProvider(p, caller, cfg.Training.CaptureDir) - } - return p - } - - // --- Start episoding agent (groups raw events into episodes) --- - var episodingAgent *episoding.EpisodingAgent - if cfg.Episoding.Enabled { - pollingInterval := time.Duration(cfg.Episoding.PollingIntervalSec) * time.Second - if pollingInterval <= 0 { - pollingInterval = 10 * time.Second - } - episodingCfg := episoding.EpisodingConfig{ - EpisodeWindowSizeMin: cfg.Episoding.EpisodeWindowSizeMin, - MinEventsPerEpisode: cfg.Episoding.MinEventsPerEpisode, - PollingInterval: pollingInterval, - StartupLookback: cfg.Episoding.StartupLookback, - DefaultSalience: cfg.Episoding.DefaultSalience, - } - episodingAgent = episoding.NewEpisodingAgent(memStore, wrap("episoding"), log, episodingCfg) - if err := episodingAgent.Start(rootCtx, bus); err != nil { - log.Error("failed to start episoding agent", "error", err) - } else { - log.Info("episoding agent started") - } - } - - // --- Start encoding agent --- - var encoder *encoding.EncodingAgent - if cfg.Encoding.Enabled { - encoder = encoding.NewEncodingAgentWithConfig(memStore, wrap("encoding"), log, buildEncodingConfig(cfg)) - if err := encoder.Start(rootCtx, bus); err != nil { - log.Error("failed to start encoding agent", "error", err) - } else { - log.Info("encoding agent started") - } - } - - // --- Build watchers based on config --- - var watchers []watcher.Watcher - var percAgent *perception.PerceptionAgent - - if cfg.Perception.Enabled { - if cfg.Perception.Filesystem.Enabled { - // Auto-detect noisy app directories and merge with configured exclusions - autoExclusions := fswatcher.DetectNoisyApps(log) - allExclusions := cfg.Perception.Filesystem.ExcludePatterns - for _, pattern := range autoExclusions { - if !fswatcher.MatchesExcludePattern(pattern, allExclusions) { - allExclusions = append(allExclusions, pattern) - } - } - - fsw, err := fswatcher.NewFilesystemWatcher(fswatcher.Config{ - WatchDirs: cfg.Perception.Filesystem.WatchDirs, - ExcludePatterns: allExclusions, - SensitivePatterns: cfg.Perception.Filesystem.SensitivePatterns, - MaxContentBytes: cfg.Perception.Filesystem.MaxContentBytes, - MaxWatches: cfg.Perception.Filesystem.MaxWatches, - ShallowDepth: cfg.Perception.Filesystem.ShallowDepth, - PollIntervalSec: cfg.Perception.Filesystem.PollIntervalSec, - PromotionThreshold: cfg.Perception.Filesystem.PromotionThreshold, - DemotionTimeoutMin: cfg.Perception.Filesystem.DemotionTimeoutMin, - }, log) - if err != nil { - log.Error("failed to create filesystem watcher", "error", err) - } else { - watchers = append(watchers, fsw) - log.Info("filesystem watcher configured", "dirs", cfg.Perception.Filesystem.WatchDirs) - } - } - - if cfg.Perception.Terminal.Enabled { - tw, err := termwatcher.NewTerminalWatcher(termwatcher.Config{ - Shell: cfg.Perception.Terminal.Shell, - PollIntervalSec: cfg.Perception.Terminal.PollIntervalSec, - ExcludePatterns: cfg.Perception.Terminal.ExcludePatterns, - }, log) - if err != nil { - log.Error("failed to create terminal watcher", "error", err) - } else { - watchers = append(watchers, tw) - log.Info("terminal watcher configured", "shell", cfg.Perception.Terminal.Shell) - } - } - - if cfg.Perception.Clipboard.Enabled { - cw, err := clipwatcher.NewClipboardWatcher(clipwatcher.Config{ - PollIntervalSec: cfg.Perception.Clipboard.PollIntervalSec, - MaxContentBytes: cfg.Perception.Clipboard.MaxContentBytes, - }, log) - if err != nil { - log.Error("failed to create clipboard watcher", "error", err) - } else { - watchers = append(watchers, cw) - log.Info("clipboard watcher configured") - } - } - - if cfg.Perception.Git.Enabled { - gw, err := gitwatcher.NewGitWatcher(gitwatcher.Config{ - WatchDirs: cfg.Perception.Filesystem.WatchDirs, - PollIntervalSec: cfg.Perception.Git.PollIntervalSec, - MaxRepoDepth: cfg.Perception.Git.MaxRepoDepth, - }, log) - if err != nil { - log.Warn("git watcher not available", "error", err) - } else { - watchers = append(watchers, gw) - log.Info("git watcher configured") - } - } - - // --- Start perception agent --- - if len(watchers) > 0 { - percAgent = perception.NewPerceptionAgent( - watchers, - memStore, - wrap("perception"), - perception.PerceptionConfig{ - HeuristicConfig: perception.HeuristicConfig{ - MinContentLength: cfg.Perception.Heuristics.MinContentLength, - MaxContentLength: cfg.Perception.Heuristics.MaxContentLength, - FrequencyThreshold: cfg.Perception.Heuristics.FrequencyThreshold, - FrequencyWindowMin: cfg.Perception.Heuristics.FrequencyWindowMin, - PassScore: float32(cfg.Perception.HeuristicPassScore), - BatchEditWindowSec: cfg.Perception.BatchEditWindowSec, - BatchEditThreshold: cfg.Perception.BatchEditThreshold, - RecallBoostMax: float32(cfg.Perception.RecallBoostMax), - RecallBoostMinutes: cfg.Perception.RecallBoostWindowMin, - ExtraIgnoredPatterns: cfg.Perception.Heuristics.ExtraIgnoredPatterns, - ExtraLockfileNames: cfg.Perception.Heuristics.ExtraLockfileNames, - ExtraAppInternalDirs: cfg.Perception.Heuristics.ExtraAppInternalDirs, - ExtraSensitiveNames: cfg.Perception.Heuristics.ExtraSensitiveNames, - ExtraSourceExtensions: cfg.Perception.Heuristics.ExtraSourceExtensions, - ExtraTrivialCommands: cfg.Perception.Heuristics.ExtraTrivialCommands, - ExtraHighSignalCommands: cfg.Perception.Heuristics.ExtraHighSignalCommands, - ExtraCodeIndicators: cfg.Perception.Heuristics.ExtraCodeIndicators, - ExtraHighSignalKeywords: cfg.Perception.Heuristics.ExtraHighSignalKeywords, - ExtraMediumKeywords: cfg.Perception.Heuristics.ExtraMediumKeywords, - ExtraLowKeywords: cfg.Perception.Heuristics.ExtraLowKeywords, - Scoring: perception.ScoringConfig{ - BaseFilesystem: cfg.Perception.Scoring.BaseFilesystem, - BaseTerminal: cfg.Perception.Scoring.BaseTerminal, - BaseClipboard: cfg.Perception.Scoring.BaseClipboard, - BaseMCP: cfg.Perception.Scoring.BaseMCP, - BoostErrorLog: cfg.Perception.Scoring.BoostErrorLog, - BoostConfig: cfg.Perception.Scoring.BoostConfig, - BoostSourceCode: cfg.Perception.Scoring.BoostSourceCode, - BoostCommand: cfg.Perception.Scoring.BoostCommand, - BoostCodeSnippet: cfg.Perception.Scoring.BoostCodeSnippet, - KeywordHigh: cfg.Perception.Scoring.KeywordHigh, - KeywordMedium: cfg.Perception.Scoring.KeywordMedium, - KeywordLow: cfg.Perception.Scoring.KeywordLow, - }, - }, - LLMGatingEnabled: cfg.Perception.LLMGatingEnabled, - LearnedExclusionsPath: cfg.Perception.LearnedExclusionsPath, - ProjectResolver: projectResolver, - ContentDedupTTLSec: cfg.Perception.ContentDedupTTLSec, - GitOpCooldownSec: cfg.Perception.GitOpCooldownSec, - MaxRawContentLen: cfg.Perception.MaxRawContentLen, - LLMGateSnippetLen: cfg.Perception.LLMGateSnippetLen, - LLMGateTimeoutSec: cfg.Perception.LLMGateTimeoutSec, - RejectionThreshold: cfg.Perception.RejectionThreshold, - RejectionWindowMin: cfg.Perception.RejectionWindowMin, - RejectionMaxPromoted: cfg.Perception.RejectionMaxPromoted, - }, - log, - ) - if err := percAgent.Start(rootCtx, bus); err != nil { - log.Error("failed to start perception agent", "error", err) - } else { - log.Info("perception agent started", "watchers", len(watchers)) - } - } - } - - // --- Create retrieval agent for API queries --- - retriever := retrieval.NewRetrievalAgent(memStore, wrap("retrieval"), buildRetrievalConfig(cfg), log, bus) - - // --- Start consolidation agent --- - var consolidator *consolidation.ConsolidationAgent - if cfg.Consolidation.Enabled { - consolidator = consolidation.NewConsolidationAgent(memStore, wrap("consolidation"), toConsolidationConfig(cfg), log) - - if err := consolidator.Start(rootCtx, bus); err != nil { - log.Error("failed to start consolidation agent", "error", err) - } else { - log.Info("consolidation agent started", "interval", cfg.Consolidation.Interval) - } - } - - // --- Start metacognition agent --- - var metaAgent *metacognition.MetacognitionAgent - if cfg.Metacognition.Enabled { - metaAgent = metacognition.NewMetacognitionAgent(memStore, wrap("metacognition"), metacognition.MetacognitionConfig{ - Interval: cfg.Metacognition.Interval, - StartupDelay: time.Duration(cfg.Metacognition.StartupDelaySec) * time.Second, - ReflectionLookback: cfg.Metacognition.ReflectionLookback, - DeadMemoryWindow: cfg.Metacognition.DeadMemoryWindow, - }, log) - - if err := metaAgent.Start(rootCtx, bus); err != nil { - log.Error("failed to start metacognition agent", "error", err) - } else { - log.Info("metacognition agent started", "interval", cfg.Metacognition.Interval) - } - } - - // --- Start dreaming agent --- - var dreamer *dreaming.DreamingAgent - if cfg.Dreaming.Enabled { - dreamer = dreaming.NewDreamingAgent(memStore, wrap("dreaming"), dreaming.DreamingConfig{ - Interval: cfg.Dreaming.Interval, - BatchSize: cfg.Dreaming.BatchSize, - SalienceThreshold: cfg.Dreaming.SalienceThreshold, - AssociationBoostFactor: cfg.Dreaming.AssociationBoostFactor, - NoisePruneThreshold: cfg.Dreaming.NoisePruneThreshold, - StartupDelay: time.Duration(cfg.Dreaming.StartupDelaySec) * time.Second, - DeadMemoryWindow: cfg.Dreaming.DeadMemoryWindow, - InsightsBudget: cfg.Dreaming.InsightsBudget, - DefaultConfidence: cfg.Dreaming.DefaultConfidence, - }, log) - - if err := dreamer.Start(rootCtx, bus); err != nil { - log.Error("failed to start dreaming agent", "error", err) - } else { - log.Info("dreaming agent started", "interval", cfg.Dreaming.Interval) - } - } - - // --- Start abstraction agent --- - var abstractionAgent *abstraction.AbstractionAgent - if cfg.Abstraction.Enabled { - abstractionAgent = abstraction.NewAbstractionAgent(memStore, wrap("abstraction"), abstraction.AbstractionConfig{ - Interval: cfg.Abstraction.Interval, - MinStrength: cfg.Abstraction.MinStrength, - MaxLLMCalls: cfg.Abstraction.MaxLLMCalls, - StartupDelay: time.Duration(cfg.Abstraction.StartupDelaySec) * time.Second, - DefaultConfidence: cfg.Abstraction.DefaultConfidence, - PatternAxiomConfidence: cfg.Abstraction.PatternAxiomConfidence, - ConfidenceModerateDecay: cfg.Abstraction.ConfidenceModerateDecay, - ConfidenceSignificantDecay: cfg.Abstraction.ConfidenceSignificantDecay, - ConfidenceSevereDecay: cfg.Abstraction.ConfidenceSevereDecay, - GroundingFloor: cfg.Abstraction.GroundingFloor, - }, log) - - if err := abstractionAgent.Start(rootCtx, bus); err != nil { - log.Error("failed to start abstraction agent", "error", err) - } else { - log.Info("abstraction agent started", "interval", cfg.Abstraction.Interval) - } - } - - // --- Start orchestrator (autonomous health monitoring and self-testing) --- - var orch *orchestrator.Orchestrator - if cfg.Orchestrator.Enabled { - orch = orchestrator.NewOrchestrator(memStore, wrap("orchestrator"), orchestrator.OrchestratorConfig{ - AdaptiveIntervals: cfg.Orchestrator.AdaptiveIntervals, - MaxDBSizeMB: cfg.Orchestrator.MaxDBSizeMB, - SelfTestInterval: cfg.Orchestrator.SelfTestInterval, - AutoRecovery: cfg.Orchestrator.AutoRecovery, - HealthReportPath: filepath.Join(filepath.Dir(cfg.Store.DBPath), "health.json"), - MonitorInterval: cfg.Orchestrator.MonitorInterval, - HealthReportInterval: cfg.Orchestrator.HealthReportInterval, - }, log) - - if err := orch.Start(rootCtx, bus); err != nil { - log.Error("failed to start orchestrator", "error", err) - } else { - log.Info("orchestrator started", - "monitor_interval", cfg.Orchestrator.MonitorInterval, - "self_test_interval", cfg.Orchestrator.SelfTestInterval) - } - } - - // --- Start reactor engine (centralized autonomous behavior coordination) --- - { - reactorLog := log.With("component", "reactor") - reactorEngine := reactor.NewEngine(memStore, bus, reactorLog) - - // Parse reactor cooldown overrides from config - var cooldownOverrides map[string]time.Duration - if len(cfg.Reactor.Cooldowns) > 0 { - cooldownOverrides = make(map[string]time.Duration, len(cfg.Reactor.Cooldowns)) - for chainID, durStr := range cfg.Reactor.Cooldowns { - d, err := time.ParseDuration(durStr) - if err != nil { - log.Warn("invalid reactor cooldown duration, ignoring", "chain_id", chainID, "value", durStr, "error", err) - continue - } - cooldownOverrides[chainID] = d - } - } - - deps := reactor.ChainDeps{ - MaxDBSizeMB: cfg.Orchestrator.MaxDBSizeMB, - CooldownOverrides: cooldownOverrides, - Logger: reactorLog, - } - if consolidator != nil { - deps.ConsolidationTrigger = consolidator.GetTriggerChannel() - } - if abstractionAgent != nil { - deps.AbstractionTrigger = abstractionAgent.GetTriggerChannel() - } - if metaAgent != nil { - deps.MetacognitionTrigger = metaAgent.GetTriggerChannel() - } - if dreamer != nil { - deps.DreamingTrigger = dreamer.GetTriggerChannel() - } - if orch != nil { - deps.IncrementAutonomous = orch.IncrementAutonomousCount - } - deps.ForumAgentPosting = cfg.Forum.AgentPosting - deps.ForumMentionResponses = cfg.Forum.MentionResponses - deps.ForumMentionMaxTokens = cfg.Forum.MentionMaxTokens - deps.ForumMentionTemp = cfg.Forum.MentionTemp - deps.ForumPerAgentSubforums = cfg.Forum.PerAgentSubforums - deps.ForumDigestPosting = cfg.Forum.DigestPosting - deps.MentionLLM = llmProvider - if retriever != nil { - deps.MentionQuery = retriever - } - - for _, chain := range reactor.NewChainRegistry(deps) { - reactorEngine.RegisterChain(chain) - } - - if err := reactorEngine.Start(rootCtx, bus); err != nil { - log.Error("failed to start reactor engine", "error", err) - } - } - - // --- Sync project forum categories --- - if n, err := memStore.SyncProjectCategories(rootCtx); err != nil { - log.Warn("failed to sync project categories", "error", err) - } else if n > 0 { - log.Info("created forum categories for projects", "count", n) - } - - // --- Backfill episode-memory links (fixes encoding/episoding race condition) --- - go func() { - if n, err := memStore.BackfillEpisodeMemoryLinks(rootCtx); err != nil { - log.Warn("failed to backfill episode memory links", "error", err) - } else if n > 0 { - log.Info("backfilled episode-memory links", "linked", n) - } - }() - - // --- Start API server --- - if cfg.API.Port > 0 { - apiDeps := api.ServerDeps{ - Store: memStore, - LLM: llmProvider, - Bus: bus, - Retriever: retriever, - IngestExcludePatterns: cfg.Perception.Filesystem.ExcludePatterns, - IngestMaxContentBytes: cfg.Perception.Filesystem.MaxContentBytes, - Version: Version, - ConfigPath: configPath, - ServiceRestarter: daemon.NewServiceManager(), - PIDRestart: daemon.PIDRestart, - MCPToolCount: mcp.ToolCount(), - StartTime: time.Now(), - Log: log, - } - // Only set Consolidator if it's non-nil (avoids Go nil-interface trap) - if consolidator != nil { - apiDeps.Consolidator = consolidator - } - if cfg.AgentSDK.Enabled && cfg.AgentSDK.EvolutionDir != "" { - apiDeps.AgentEvolutionDir = cfg.AgentSDK.EvolutionDir - apiDeps.AgentWebPort = cfg.AgentSDK.WebPort - } - - // Set API routes memory defaults from config - routes.FeedbackStrengthDelta = cfg.MemoryDefaults.FeedbackStrengthDelta - routes.FeedbackSalienceBoost = cfg.MemoryDefaults.FeedbackSalienceBoost - routes.InitialSalienceForType = func(memType string) float32 { - return cfg.MemoryDefaults.SalienceForType(memType) - } - - apiServer := api.NewServer(api.ServerConfig{ - Host: cfg.API.Host, - Port: cfg.API.Port, - RequestTimeoutSec: cfg.API.RequestTimeoutSec, - Token: cfg.API.Token, - AllowedOrigins: cfg.API.AllowedOrigins, - }, apiDeps) - - if err := apiServer.Start(); err != nil { - log.Error("failed to start API server", "error", err) - } else { - log.Info("API server started", "addr", fmt.Sprintf("%s:%d", cfg.API.Host, cfg.API.Port)) - defer func() { - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) - defer shutdownCancel() - _ = apiServer.Stop(shutdownCtx) - }() - } - } - - // --- Start agent web server (Python WebSocket) --- - agentWebCmd, agentWebDone := startAgentWebServer(cfg, log) - - // Set up signal handling for graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, shutdownSignals()...) - - // Block until signal received - sig := <-sigChan - log.Info("shutdown signal received", "signal", sig.String()) - - // Graceful shutdown: cancel root context to stop all agents - rootCancel() - - // Stop agent web server if running. Use agentWebDone (owned by the - // background goroutine) instead of calling cmd.Wait() a second time. - if agentWebCmd != nil && agentWebCmd.Process != nil { - log.Info("stopping agent web server", "pid", agentWebCmd.Process.Pid) - // On Unix, send SIGTERM for graceful shutdown. On Windows, SIGTERM - // is not supported — go straight to Kill(). - if runtime.GOOS != "windows" { - if err := agentWebCmd.Process.Signal(syscall.SIGTERM); err != nil { - log.Warn("failed to send SIGTERM to agent web server", "error", err) - _ = agentWebCmd.Process.Kill() - } - } else { - _ = agentWebCmd.Process.Kill() - } - select { - case <-agentWebDone: - case <-time.After(5 * time.Second): - log.Warn("agent web server did not exit in 5s, killing") - _ = agentWebCmd.Process.Kill() - } - } - - // Give agents a moment to drain - time.Sleep(500 * time.Millisecond) - - if orch != nil { - _ = orch.Stop() - } - if abstractionAgent != nil { - _ = abstractionAgent.Stop() - } - if dreamer != nil { - _ = dreamer.Stop() - } - if metaAgent != nil { - _ = metaAgent.Stop() - } - if consolidator != nil { - _ = consolidator.Stop() - } - if encoder != nil { - _ = encoder.Stop() - } - if episodingAgent != nil { - _ = episodingAgent.Stop() - } - if percAgent != nil { - _ = percAgent.Stop() - } - - if err := bus.Close(); err != nil { - log.Error("error closing event bus", "error", err) - } - - if err := memStore.Close(); err != nil { - log.Error("error closing store", "error", err) - } - - log.Info("mnemonic daemon shutdown complete") -} - -// ============================================================================ -// CLI Commands (remember / recall / consolidate) -// ============================================================================ - -// buildRetrievalConfig maps the central config to the retrieval agent's config struct. -func buildRetrievalConfig(cfg *config.Config) retrieval.RetrievalConfig { - return retrieval.RetrievalConfig{ - MaxHops: cfg.Retrieval.MaxHops, - ActivationThreshold: float32(cfg.Retrieval.ActivationThreshold), - DecayFactor: float32(cfg.Retrieval.DecayFactor), - MaxResults: cfg.Retrieval.MaxResults, - MaxToolCalls: cfg.Retrieval.MaxToolCalls, - SynthesisMaxTokens: cfg.Retrieval.SynthesisMaxTokens, - MergeAlpha: float32(cfg.Retrieval.MergeAlpha), - DualHitBonus: float32(cfg.Retrieval.DualHitBonus), - - FTSCandidateLimit: cfg.Retrieval.FTSCandidateLimit, - EmbeddingCandidateLimit: cfg.Retrieval.EmbeddingCandidateLimit, - PatternSearchLimit: cfg.Retrieval.PatternSearchLimit, - AbstractionSearchLimit: cfg.Retrieval.AbstractionSearchLimit, - - FTSRankWeight: float32(cfg.Retrieval.FTSRankWeight), - FTSSalienceWeight: float32(cfg.Retrieval.FTSSalienceWeight), - DefaultSalience: float32(cfg.Retrieval.DefaultSalience), - - TimeRangeBaseScore: float32(cfg.Retrieval.TimeRangeBaseScore), - TimeRangeSalienceWt: float32(cfg.Retrieval.TimeRangeSalienceWt), - - RecencyBoostWeight: float32(cfg.Retrieval.RecencyBoostWeight), - RecencyHalfLifeDays: float32(cfg.Retrieval.RecencyHalfLifeDays), - ActivityBonusMax: float32(cfg.Retrieval.ActivityBonusMax), - ActivityBonusScale: float32(cfg.Retrieval.ActivityBonusScale), - - CriticalBoost: float32(cfg.Retrieval.CriticalBoost), - ImportantBoost: float32(cfg.Retrieval.ImportantBoost), - - DiversityLambda: float32(cfg.Retrieval.DiversityLambda), - DiversityThreshold: float32(cfg.Retrieval.DiversityThreshold), - - FeedbackWeight: float32(cfg.Retrieval.FeedbackWeight), - SourceWeights: convertSourceWeights(cfg.Retrieval.SourceWeights), - TypeWeights: convertSourceWeights(cfg.Retrieval.TypeWeights), - - ContextBoostWindowMin: cfg.Perception.RecallBoostWindowMin, - ContextBoostMax: float32(cfg.Perception.RecallBoostMax), - ContextBoostSources: convertContextBoostSources(cfg.Retrieval.ContextBoostSources), - } -} - -// convertContextBoostSources converts []string to map[string]bool. -func convertContextBoostSources(src []string) map[string]bool { - if src == nil { - return nil - } - out := make(map[string]bool, len(src)) - for _, s := range src { - out[s] = true - } - return out -} - -// convertSourceWeights converts map[string]float64 to map[string]float32. -func convertSourceWeights(src map[string]float64) map[string]float32 { - if src == nil { - return nil - } - out := make(map[string]float32, len(src)) - for k, v := range src { - out[k] = float32(v) - } - return out -} - -// initRuntime loads config, opens store and LLM for CLI commands. -// The returned Provider includes training data capture if enabled in config. -func initRuntime(configPath string) (*config.Config, *sqlite.SQLiteStore, llm.Provider, *slog.Logger) { - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - log, err := logger.New(logger.Config{Level: "warn", Format: "text"}) - if err != nil { - die(exitGeneral, fmt.Sprintf("initializing logger: %v", err), "") - } - - _ = cfg.EnsureDataDir() - - db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) - if err != nil { - die(exitDatabase, fmt.Sprintf("opening database: %v", err), "mnemonic diagnose") - } - - provider := newLLMProvider(cfg) - - // Wrap with training data capture if enabled - if cfg.Training.CaptureEnabled && cfg.Training.CaptureDir != "" { - provider = llm.NewTrainingCaptureProvider(provider, "cli", cfg.Training.CaptureDir) - } - - return cfg, db, provider, log -} - -// toConsolidationConfig converts the global config's consolidation settings to the agent's config. -func toConsolidationConfig(cfg *config.Config) consolidation.ConsolidationConfig { - return consolidation.ConsolidationConfig{ - Interval: cfg.Consolidation.Interval, - DecayRate: cfg.Consolidation.DecayRate, - FadeThreshold: cfg.Consolidation.FadeThreshold, - ArchiveThreshold: cfg.Consolidation.ArchiveThreshold, - RetentionWindow: cfg.Consolidation.RetentionWindow, - MaxMemoriesPerCycle: cfg.Consolidation.MaxMemoriesPerCycle, - MaxMergesPerCycle: cfg.Consolidation.MaxMergesPerCycle, - MinClusterSize: cfg.Consolidation.MinClusterSize, - AssocPruneThreshold: consolidation.DefaultConfig().AssocPruneThreshold, - RecencyProtection24h: cfg.Consolidation.RecencyProtection24h, - RecencyProtection168h: cfg.Consolidation.RecencyProtection168h, - AccessResistanceCap: cfg.Consolidation.AccessResistanceCap, - AccessResistanceScale: cfg.Consolidation.AccessResistanceScale, - MergeSimilarityThreshold: cfg.Consolidation.MergeSimilarityThreshold, - PatternMatchThreshold: cfg.Consolidation.PatternMatchThreshold, - PatternStrengthIncrement: float32(cfg.Consolidation.PatternStrengthIncrement), - PatternIncrementCap: float32(cfg.Consolidation.PatternIncrementCap), - LargeClusterBonus: float32(cfg.Consolidation.LargeClusterBonus), - LargeClusterMinSize: cfg.Consolidation.LargeClusterMinSize, - PatternStrengthCeiling: float32(cfg.Consolidation.PatternStrengthCeiling), - StrongEvidenceCeiling: float32(cfg.Consolidation.StrongEvidenceCeiling), - StrongEvidenceMinCount: cfg.Consolidation.StrongEvidenceMinCount, - PatternBaselineDecay: float32(cfg.Consolidation.PatternBaselineDecay), - StaleDecayHealthy: float32(cfg.Consolidation.StaleDecayHealthy), - StaleDecayModerate: float32(cfg.Consolidation.StaleDecayModerate), - StaleDecayAggressive: float32(cfg.Consolidation.StaleDecayAggressive), - SelfSustainingMinEvidence: cfg.Consolidation.SelfSustainingMinEvidence, - SelfSustainingMinStrength: float32(cfg.Consolidation.SelfSustainingMinStrength), - SelfSustainingDecay: float32(cfg.Consolidation.SelfSustainingDecay), - NeverRecalledArchiveDays: cfg.Consolidation.NeverRecalledArchiveDays, - StartupDelay: time.Duration(cfg.Consolidation.StartupDelaySec) * time.Second, - } -} - -// rememberCommand stores text in the memory system. -// If the daemon is running, it writes the raw memory to the DB and notifies the -// daemon via API so the daemon's own encoding agent picks it up (no duplicate encoder). -// If the daemon is NOT running, it spins up a local encoder and waits for it to finish. -func rememberCommand(configPath, text string) { - const maxRememberBytes = 10240 // 10KB - if len(text) > maxRememberBytes { - fmt.Fprintf(os.Stderr, "Error: input too large (%d bytes, max %d). Pipe large content through 'mnemonic ingest' instead.\n", len(text), maxRememberBytes) - os.Exit(1) - } - - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - // Write raw memory - raw := store.RawMemory{ - ID: uuid.New().String(), - Timestamp: time.Now(), - Source: "user", - Type: "explicit", - Content: text, - InitialSalience: 0.7, - CreatedAt: time.Now(), - } - if err := db.WriteRaw(ctx, raw); err != nil { - fmt.Fprintf(os.Stderr, "Error writing raw memory: %v\n", err) - os.Exit(1) - } - - // If daemon is running, just write raw and let the daemon's encoder handle it. - // The daemon's encoding agent polls for unprocessed raw memories every 5s. - if running, _ := daemon.IsRunning(); running { - fmt.Printf("Remembered: %s\n", text) - fmt.Printf(" (daemon is running — encoding will happen automatically)\n") - return - } - - // Daemon not running — spin up a local encoder with a generous timeout - fmt.Printf("Encoding locally (daemon not running)...\n") - - timeoutSec := cfg.LLM.TimeoutSec - if timeoutSec < 60 { - timeoutSec = 60 - } - encodeCtx, encodeCancel := context.WithTimeout(ctx, time.Duration(timeoutSec)*time.Second) - defer encodeCancel() - - bus := events.NewInMemoryBus(100) - defer func() { _ = bus.Close() }() - - encoder := encoding.NewEncodingAgentWithConfig(db, llmProvider, log, buildEncodingConfig(cfg)) - if err := encoder.Start(encodeCtx, bus); err != nil { - fmt.Fprintf(os.Stderr, "Error starting encoder: %v\n", err) - os.Exit(1) - } - - // Publish event to trigger encoding - _ = bus.Publish(encodeCtx, events.RawMemoryCreated{ - ID: raw.ID, - Source: raw.Source, - Salience: raw.InitialSalience, - Ts: raw.Timestamp, - }) - - // Poll until the raw memory is marked processed or we time out - deadline := time.After(time.Duration(timeoutSec) * time.Second) - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - encoded := false - for !encoded { - select { - case <-deadline: - fmt.Fprintf(os.Stderr, "Warning: encoding timed out after %ds\n", timeoutSec) - encoded = true - case <-ticker.C: - r, err := db.GetRaw(ctx, raw.ID) - if err == nil && r.Processed { - encoded = true - } - } - } - - _ = encoder.Stop() - fmt.Printf("Remembered: %s\n", text) -} - -// recallCommand retrieves memories matching a query. -func recallCommand(configPath, query string) { - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - retriever := retrieval.NewRetrievalAgent(db, llmProvider, buildRetrievalConfig(cfg), log, nil) - - resp, err := retriever.Query(ctx, retrieval.QueryRequest{ - Query: query, - Synthesize: true, - }) - if err != nil { - fmt.Fprintf(os.Stderr, "Error recalling: %v\n", err) - os.Exit(1) - } - - if len(resp.Memories) == 0 { - fmt.Println("No memories found.") - return - } - - fmt.Printf("Found %d memories (took %dms):\n\n", len(resp.Memories), resp.TookMs) - for i, result := range resp.Memories { - fmt.Printf(" %d. [%.2f] %s\n", i+1, result.Score, result.Memory.Summary) - if result.Memory.Content != "" && result.Memory.Content != result.Memory.Summary { - fmt.Printf(" %s\n", result.Memory.Content) - } - fmt.Println() - } - - if resp.Synthesis != "" { - fmt.Printf("Synthesis:\n %s\n", resp.Synthesis) - } -} - -// consolidateCommand runs a single memory consolidation cycle. -func consolidateCommand(configPath string) { - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - bus := events.NewInMemoryBus(100) - defer func() { _ = bus.Close() }() - - consolidator := consolidation.NewConsolidationAgent(db, llmProvider, toConsolidationConfig(cfg), log) - - fmt.Println("Running consolidation cycle...") - - report, err := consolidator.RunOnce(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Consolidation failed: %v\n", err) - os.Exit(1) - } - - // Publish events for dashboard - _ = bus.Publish(ctx, events.ConsolidationCompleted{ - DurationMs: report.Duration.Milliseconds(), - MemoriesProcessed: report.MemoriesProcessed, - MemoriesDecayed: report.MemoriesDecayed, - MergedClusters: report.MergesPerformed, - AssociationsPruned: report.AssociationsPruned, - Ts: time.Now(), - }) - - fmt.Printf("Consolidation complete (%dms):\n", report.Duration.Milliseconds()) - fmt.Printf(" Memories processed: %d\n", report.MemoriesProcessed) - fmt.Printf(" Salience decayed: %d\n", report.MemoriesDecayed) - fmt.Printf(" Transitioned fading: %d\n", report.TransitionedFading) - fmt.Printf(" Transitioned archived: %d\n", report.TransitionedArchived) - fmt.Printf(" Associations pruned: %d\n", report.AssociationsPruned) - fmt.Printf(" Merges performed: %d\n", report.MergesPerformed) - fmt.Printf(" Expired deleted: %d\n", report.ExpiredDeleted) -} - -// ============================================================================ -// Export / Import / Backup Commands -// ============================================================================ - -// exportCommand exports the memory store to a file. -func exportCommand(configPath string, args []string) { - cfg, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - // Parse flags - format := "json" - outputPath := "" - for i := 1; i < len(args); i++ { - switch args[i] { - case "--format": - if i+1 < len(args) { - format = args[i+1] - i++ - } - case "--output": - if i+1 < len(args) { - outputPath = args[i+1] - i++ - } - } - } - - // Default output path - if outputPath == "" { - backupDir, err := backup.EnsureBackupDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating backup directory: %v\n", err) - os.Exit(1) - } - timestamp := time.Now().Format("2006-01-02_150405") - outputPath = filepath.Join(backupDir, fmt.Sprintf("export_%s.%s", timestamp, format)) - } - - switch format { - case "json": - fmt.Printf("Exporting to JSON: %s\n", outputPath) - if err := backup.ExportJSON(ctx, db, outputPath); err != nil { - fmt.Fprintf(os.Stderr, "Export failed: %v\n", err) - os.Exit(1) - } - case "sqlite": - fmt.Printf("Exporting SQLite copy: %s\n", outputPath) - if err := backup.ExportSQLite(ctx, cfg.Store.DBPath, outputPath); err != nil { - fmt.Fprintf(os.Stderr, "Export failed: %v\n", err) - os.Exit(1) - } - default: - fmt.Fprintf(os.Stderr, "Unknown format: %s (supported: json, sqlite)\n", format) - os.Exit(1) - } - - // Get file size - if info, err := os.Stat(outputPath); err == nil { - fmt.Printf("%sExport complete.%s (%.1f KB)\n", colorGreen, colorReset, float64(info.Size())/1024) - } else { - fmt.Printf("%sExport complete.%s\n", colorGreen, colorReset) - } -} - -// importCommand imports memories from a JSON export file. -func importCommand(configPath, filePath string, args []string) { - _, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - // Parse mode - mode := backup.ModeMerge - for i := 2; i < len(args); i++ { - if args[i] == "--mode" && i+1 < len(args) { - switch args[i+1] { - case "merge": - mode = backup.ModeMerge - case "replace": - mode = backup.ModeReplace - default: - fmt.Fprintf(os.Stderr, "Unknown mode: %s (supported: merge, replace)\n", args[i+1]) - os.Exit(1) - } - i++ - } - } - - fmt.Printf("Importing from %s (mode: %s)...\n", filePath, mode) - - result, err := backup.ImportFromJSON(ctx, db, filePath, mode) - if err != nil { - fmt.Fprintf(os.Stderr, "Import failed: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sImport complete%s (%dms):\n", colorGreen, colorReset, result.Duration.Milliseconds()) - fmt.Printf(" Memories imported: %d\n", result.MemoriesImported) - fmt.Printf(" Associations imported: %d\n", result.AssociationsImported) - fmt.Printf(" Raw memories imported: %d\n", result.RawMemoriesImported) - fmt.Printf(" Skipped duplicates: %d\n", result.SkippedDuplicates) - if len(result.Errors) > 0 { - fmt.Printf(" %sWarnings:%s %d\n", colorYellow, colorReset, len(result.Errors)) - } -} - -// backupCommand creates a timestamped backup with retention. -func backupCommand(configPath string) { - _, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - backupDir, err := backup.EnsureBackupDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating backup directory: %v\n", err) - os.Exit(1) - } - - fmt.Printf("Backing up to %s...\n", backupDir) - - backupPath, err := backup.BackupWithRetention(ctx, db, backupDir, 5) - if err != nil { - fmt.Fprintf(os.Stderr, "Backup failed: %v\n", err) - os.Exit(1) - } - - if info, err := os.Stat(backupPath); err == nil { - fmt.Printf("%sBackup complete.%s %s (%.1f KB)\n", colorGreen, colorReset, filepath.Base(backupPath), float64(info.Size())/1024) - } else { - fmt.Printf("%sBackup complete.%s %s\n", colorGreen, colorReset, filepath.Base(backupPath)) - } -} - -// ============================================================================ -// Restore Command (disaster recovery) -// ============================================================================ - -// restoreCommand restores the database from a SQLite backup file. -func restoreCommand(configPath string, backupPath string) { - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - // Verify backup file exists - info, err := os.Stat(backupPath) - if err != nil { - die(exitUsage, fmt.Sprintf("backup file not found: %s", backupPath), "check the file path") - } - if info.IsDir() { - die(exitUsage, fmt.Sprintf("%s is a directory, not a backup file", backupPath), "provide a .db file path") - } - - // Verify backup integrity by opening it as a SQLite database - fmt.Printf("Verifying backup integrity: %s\n", backupPath) - testStore, err := sqlite.NewSQLiteStore(backupPath, 5000) - if err != nil { - die(exitDatabase, fmt.Sprintf("backup is not a valid SQLite database: %v", err), "") - } - intCtx, intCancel := context.WithTimeout(context.Background(), 30*time.Second) - intErr := testStore.CheckIntegrity(intCtx) - intCancel() - _ = testStore.Close() - if intErr != nil { - die(exitDatabase, fmt.Sprintf("backup file is corrupted: %v", intErr), "") - } - fmt.Printf(" %s✓ Backup integrity verified%s\n", colorGreen, colorReset) - - // Check if daemon is running - svc := daemon.NewServiceManager() - if running, _ := svc.IsRunning(); running { - die(exitGeneral, "daemon is running", "mnemonic stop") - } - - // If current DB exists, move it aside - dbPath := cfg.Store.DBPath - if _, statErr := os.Stat(dbPath); statErr == nil { - aside := dbPath + ".pre-restore" - fmt.Printf(" Moving current database to %s\n", aside) - if err := os.Rename(dbPath, aside); err != nil { - die(exitPermission, fmt.Sprintf("moving current database: %v", err), "check file permissions") - } - } - - // Copy backup to DB path - _ = cfg.EnsureDataDir() - if err := backup.ExportSQLite(context.Background(), backupPath, dbPath); err != nil { - fmt.Fprintf(os.Stderr, "Error copying backup to database path: %v\n", err) - os.Exit(1) - } - - fmt.Printf("\n%s✓ Database restored from %s%s\n", colorGreen, filepath.Base(backupPath), colorReset) - fmt.Printf(" Database: %s (%.1f KB)\n", dbPath, float64(info.Size())/1024) - fmt.Printf(" Start the daemon with 'mnemonic start' or 'mnemonic serve'.\n") -} - -// ============================================================================ -// Purge Command (reset database) -// ============================================================================ - -// purgeCommand stops the daemon, deletes the database and log, and starts fresh. -func purgeCommand(configPath string) { - cfg, err := config.Load(configPath) - if err != nil { - die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") - } - - // Confirm with user - fmt.Printf("%sThis will permanently delete all memories and reset the database.%s\n", colorRed, colorReset) - fmt.Printf(" Database: %s\n", cfg.Store.DBPath) - fmt.Printf("\nType 'yes' to confirm: ") - - var confirmation string - _, _ = fmt.Scanln(&confirmation) - if confirmation != "yes" { - fmt.Println("Aborted.") - return - } - - // Stop daemon if running - if running, pid := daemon.IsRunning(); running { - fmt.Printf("Stopping daemon (PID %d)...\n", pid) - if err := daemon.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to stop daemon: %v\n", err) - fmt.Fprintf(os.Stderr, "Please stop it manually and try again.\n") - os.Exit(1) - } - time.Sleep(1 * time.Second) - } - - // Resolve DB path (handle ~ expansion) - dbPath := cfg.Store.DBPath - if strings.HasPrefix(dbPath, "~") { - home, _ := os.UserHomeDir() - dbPath = filepath.Join(home, dbPath[1:]) - } - - // Delete database file and WAL/SHM files - deleted := 0 - for _, suffix := range []string{"", "-wal", "-shm"} { - path := dbPath + suffix - if _, err := os.Stat(path); err == nil { - if err := os.Remove(path); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to delete %s: %v\n", path, err) - } else { - deleted++ - } - } - } - - if deleted > 0 { - fmt.Printf("%sDatabase purged.%s Deleted %d file(s).\n", colorGreen, colorReset, deleted) - } else { - fmt.Printf("No database files found at %s (already clean).\n", dbPath) - } - - fmt.Println("\nThe database will be recreated automatically on next start.") - fmt.Printf(" mnemonic start\n") -} - -// ============================================================================ -// Cleanup Command (selective noise removal) -// ============================================================================ - -// cleanupCommand scans raw_memories for paths matching exclude patterns and -// bulk-marks them as processed, then archives any encoded memories derived from them. -func cleanupCommand(configPath string, args []string) { - cfg, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - patterns := cfg.Perception.Filesystem.ExcludePatterns - if len(patterns) == 0 { - fmt.Println("No exclude patterns configured in config.yaml — nothing to clean.") - return - } - - // Check for flags - autoConfirm := false - cleanPatterns := false - for _, a := range args { - if a == "--yes" || a == "-y" { - autoConfirm = true - } - if a == "--patterns" { - cleanPatterns = true - } - } - - // Count what would be cleaned - rawCount, err := db.CountRawUnprocessedByPathPatterns(ctx, patterns) - if err != nil { - fmt.Fprintf(os.Stderr, "Error counting raw memories: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sCleanup Summary%s\n", colorBold, colorReset) - fmt.Printf(" Exclude patterns: %d (from config.yaml)\n", len(patterns)) - fmt.Printf(" Unprocessed raw events: %s%d%s matching exclude patterns\n", colorYellow, rawCount, colorReset) - if cleanPatterns { - fmt.Printf(" --patterns flag: will archive all active patterns and abstractions\n") - } - - if rawCount == 0 && !cleanPatterns { - fmt.Println("\nNothing to clean up.") - return - } - - if !autoConfirm { - fmt.Printf("\nThis will mark matching raw events as processed and archive derived memories.\n") - if cleanPatterns { - fmt.Printf("It will also archive ALL active patterns and abstractions (they regenerate from clean data).\n") - } - fmt.Printf("Type 'yes' to confirm: ") - var confirmation string - _, _ = fmt.Scanln(&confirmation) - if confirmation != "yes" { - fmt.Println("Aborted.") - return - } - } - - rawCleaned := 0 - memArchived := 0 - - if rawCount > 0 { - // Mark raw events as processed - rawCleaned, err = db.BulkMarkRawProcessedByPathPatterns(ctx, patterns) - if err != nil { - fmt.Fprintf(os.Stderr, "Error cleaning raw memories: %v\n", err) - os.Exit(1) - } - - // Archive derived encoded memories - memArchived, err = db.ArchiveMemoriesByRawPathPatterns(ctx, patterns) - if err != nil { - fmt.Fprintf(os.Stderr, "Error archiving memories: %v\n", err) - os.Exit(1) - } - } - - patternsArchived := 0 - abstractionsArchived := 0 - if cleanPatterns { - patternsArchived, err = db.ArchiveAllPatterns(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Error archiving patterns: %v\n", err) - os.Exit(1) - } - abstractionsArchived, err = db.ArchiveAllAbstractions(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Error archiving abstractions: %v\n", err) - os.Exit(1) - } - } - - fmt.Printf("\n%sCleanup complete%s\n", colorGreen, colorReset) - fmt.Printf(" Raw events marked processed: %d\n", rawCleaned) - fmt.Printf(" Encoded memories archived: %d\n", memArchived) - if cleanPatterns { - fmt.Printf(" Patterns archived: %d\n", patternsArchived) - fmt.Printf(" Abstractions archived: %d\n", abstractionsArchived) - } -} - -// ============================================================================ -// Insights Command (metacognition) -// ============================================================================ - -// insightsCommand displays recent metacognition observations. -func insightsCommand(configPath string) { - _, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - observations, err := db.ListMetaObservations(ctx, "", 20) - if err != nil { - fmt.Fprintf(os.Stderr, "Error fetching insights: %v\n", err) - os.Exit(1) - } - - if len(observations) == 0 { - fmt.Println("No insights available yet. The metacognition agent runs periodically to analyze memory health.") - fmt.Println("Run manually with: mnemonic meta-cycle") - return - } - - fmt.Printf("%sMnemonic Insights%s\n\n", colorBold, colorReset) - - for _, obs := range observations { - // Severity color - severityColor := colorGray - switch obs.Severity { - case "warning": - severityColor = colorYellow - case "critical": - severityColor = colorRed - case "info": - severityColor = colorCyan - } - - // Format observation type - typeLabel := strings.ReplaceAll(obs.ObservationType, "_", " ") - typeLabel = strings.ToUpper(typeLabel[:1]) + typeLabel[1:] - - ago := time.Since(obs.CreatedAt).Round(time.Minute) - timeStr := formatDuration(ago) - if timeStr != "just now" { - timeStr += " ago" - } - fmt.Printf(" %s[%s]%s %s%s%s (%s)\n", - severityColor, strings.ToUpper(obs.Severity), colorReset, - colorBold, typeLabel, colorReset, - timeStr) - - // Print details - for key, val := range obs.Details { - keyLabel := strings.ReplaceAll(key, "_", " ") - fmt.Printf(" %s: %s\n", keyLabel, formatDetailValue(val)) - } - fmt.Println() - } -} - -// formatDetailValue renders a detail value in a human-friendly way. -func formatDetailValue(val interface{}) string { - switch v := val.(type) { - case float64: - if v == float64(int64(v)) { - return fmt.Sprintf("%d", int64(v)) - } - return fmt.Sprintf("%.1f%%", v*100) - case map[string]interface{}: - parts := []string{} - for k, mv := range v { - switch n := mv.(type) { - case float64: - parts = append(parts, fmt.Sprintf("%s=%d", k, int64(n))) - default: - parts = append(parts, fmt.Sprintf("%s=%v", k, mv)) - } - } - return strings.Join(parts, ", ") - default: - return fmt.Sprintf("%v", val) - } -} - -// metaCycleCommand runs a single metacognition cycle and displays results. -func metaCycleCommand(configPath string) { - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - bus := events.NewInMemoryBus(100) - defer func() { _ = bus.Close() }() - - agent := metacognition.NewMetacognitionAgent(db, llmProvider, metacognition.MetacognitionConfig{ - Interval: 24 * time.Hour, // doesn't matter for RunOnce - ReflectionLookback: cfg.Metacognition.ReflectionLookback, - DeadMemoryWindow: cfg.Metacognition.DeadMemoryWindow, - }, log) - - fmt.Println("Running metacognition cycle...") - - report, err := agent.RunOnce(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Metacognition cycle failed: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sMetacognition complete%s (%dms):\n", colorGreen, colorReset, report.Duration.Milliseconds()) - - if len(report.Observations) == 0 { - fmt.Println(" No issues found — memory health looks good.") - return - } - - fmt.Printf(" %d observation(s):\n\n", len(report.Observations)) - for _, obs := range report.Observations { - severityColor := colorGray - switch obs.Severity { - case "warning": - severityColor = colorYellow - case "critical": - severityColor = colorRed - case "info": - severityColor = colorCyan - } - - typeLabel := strings.ReplaceAll(obs.ObservationType, "_", " ") - typeLabel = strings.ToUpper(typeLabel[:1]) + typeLabel[1:] - - fmt.Printf(" %s[%s]%s %s\n", severityColor, strings.ToUpper(obs.Severity), colorReset, typeLabel) - for key, val := range obs.Details { - keyLabel := strings.ReplaceAll(key, "_", " ") - fmt.Printf(" %s: %v\n", keyLabel, val) - } - fmt.Println() - } -} - -// dreamCycleCommand runs a single dream cycle and displays results. -func dreamCycleCommand(configPath string) { - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - bus := events.NewInMemoryBus(100) - defer func() { _ = bus.Close() }() - - agent := dreaming.NewDreamingAgent(db, llmProvider, dreaming.DreamingConfig{ - Interval: 3 * time.Hour, // doesn't matter for RunOnce - BatchSize: cfg.Dreaming.BatchSize, - SalienceThreshold: cfg.Dreaming.SalienceThreshold, - AssociationBoostFactor: cfg.Dreaming.AssociationBoostFactor, - NoisePruneThreshold: cfg.Dreaming.NoisePruneThreshold, - DeadMemoryWindow: cfg.Dreaming.DeadMemoryWindow, - InsightsBudget: cfg.Dreaming.InsightsBudget, - DefaultConfidence: cfg.Dreaming.DefaultConfidence, - }, log) - - fmt.Println("Running dream cycle (memory replay)...") - - report, err := agent.RunOnce(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Dream cycle failed: %v\n", err) - os.Exit(1) - } - - fmt.Printf("%sDream cycle complete%s (%dms):\n", colorGreen, colorReset, report.Duration.Milliseconds()) - fmt.Printf(" Memories replayed: %d\n", report.MemoriesReplayed) - fmt.Printf(" Associations strengthened: %d\n", report.AssociationsStrengthened) - fmt.Printf(" New associations created: %d\n", report.NewAssociationsCreated) - fmt.Printf(" Noisy memories demoted: %d\n", report.NoisyMemoriesDemoted) -} - -// mcpCommand runs the MCP server on stdin/stdout for AI agent integration. -func mcpCommand(configPath string) { - cfg, db, llmProvider, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - bus := events.NewInMemoryBus(100) - defer func() { _ = bus.Close() }() - - // Create encoding agent so remembered memories get encoded. - // Polling is disabled in MCP mode — each MCP process only encodes via events - // for memories it creates. The daemon is the sole poller. This prevents N - // MCP processes from independently encoding the same unprocessed raw memories. - mcpEncodingCfg := buildEncodingConfig(cfg) - mcpEncodingCfg.DisablePolling = true - encoder := encoding.NewEncodingAgentWithConfig(db, llmProvider, log, mcpEncodingCfg) - if err := encoder.Start(ctx, bus); err != nil { - log.Error("failed to start encoding agent for MCP", "error", err) - } - defer func() { _ = encoder.Stop() }() - - // Create retrieval agent for recall - retriever := retrieval.NewRetrievalAgent(db, llmProvider, buildRetrievalConfig(cfg), log, bus) - - mcpResolver := config.NewProjectResolver(cfg.Projects) - daemonURL := fmt.Sprintf("http://%s:%d", cfg.API.Host, cfg.API.Port) - memDefaults := mcp.MemoryDefaults{ - SalienceGeneral: cfg.MemoryDefaults.InitialSalienceGeneral, - SalienceDecision: cfg.MemoryDefaults.InitialSalienceDecision, - SalienceError: cfg.MemoryDefaults.InitialSalienceError, - SalienceInsight: cfg.MemoryDefaults.InitialSalienceInsight, - SalienceLearning: cfg.MemoryDefaults.InitialSalienceLearning, - SalienceHandoff: cfg.MemoryDefaults.InitialSalienceHandoff, - FeedbackStrengthDelta: cfg.MemoryDefaults.FeedbackStrengthDelta, - FeedbackSalienceBoost: cfg.MemoryDefaults.FeedbackSalienceBoost, - } - server := mcp.NewMCPServer(db, retriever, bus, log, Version, cfg.Coaching.CoachingFile, cfg.Perception.Filesystem.ExcludePatterns, cfg.Perception.Filesystem.MaxContentBytes, mcpResolver, daemonURL, memDefaults) - - // Handle signal for graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, shutdownSignals()...) - go func() { - <-sigChan - cancel() - }() - - if err := server.Run(ctx); err != nil { - fmt.Fprintf(os.Stderr, "MCP server error: %v\n", err) - os.Exit(1) - } -} - -// ============================================================================ -// Usage -// ============================================================================ - -// printUsage prints the command usage. -func printUsage() { - usage := `mnemonic v%s - A semantic memory system daemon - -USAGE: - mnemonic [OPTIONS] [COMMAND] - -OPTIONS: - --config PATH Path to config.yaml (default: "config.yaml") - --help Show this help message - -DAEMON COMMANDS: - start Start the mnemonic daemon (background) - stop Stop the running daemon - restart Restart the daemon - serve Run in foreground (for debugging) - -MEMORY COMMANDS: - remember TEXT Store text in memory - recall QUERY Retrieve memories matching query - consolidate Run memory consolidation cycle - -DATA MANAGEMENT: - ingest DIR Bulk-ingest a directory (--dry-run, --project NAME) - export Export memories (--format json|sqlite, --output path) - import FILE Import from JSON export (--mode merge|replace) - backup Timestamped backup with retention (keeps last 5) - restore FILE Restore database from a SQLite backup file - cleanup Remove noise: mark excluded-path raw events as processed (--yes) - purge Stop daemon and delete all data (fresh start) - insights Show metacognition observations (memory health) - meta-cycle Run a single metacognition analysis cycle - dream-cycle Run a single dream replay cycle - -AI AGENT INTEGRATION: - mcp Run MCP server on stdin/stdout (for AI agents) - -MONITORING COMMANDS: - status Show comprehensive system status - diagnose Run health checks (config, DB, LLM, disk) - watch Live stream of daemon events - -UPDATE COMMANDS: - check-update Check if a newer version is available - update Download and install the latest version - -SETUP COMMANDS: - install Install as system service (auto-start on login) - uninstall Remove system service - generate-token Generate a random API authentication token - version Show version - -EXAMPLES: - mnemonic start Start daemon - mnemonic status Check everything - mnemonic watch Live event stream - mnemonic remember "I learned something today" Store a memory - mnemonic recall "important lessons" Retrieve memories - mnemonic ingest ~/Projects/myapp --project myapp Ingest a project - mnemonic export --format json Export all data - mnemonic backup Quick backup - mnemonic insights Memory health report - mnemonic dream-cycle Run dream replay - mnemonic mcp Start MCP server (stdio) - mnemonic install Auto-start on boot - mnemonic autopilot Autonomous activity log - mnemonic restore ~/.mnemonic/backups/backup.db Restore from backup +EXAMPLES: + mnemonic start Start daemon + mnemonic status Check everything + mnemonic watch Live event stream + mnemonic remember "I learned something today" Store a memory + mnemonic recall "important lessons" Retrieve memories + mnemonic ingest ~/Projects/myapp --project myapp Ingest a project + mnemonic export --format json Export all data + mnemonic backup Quick backup + mnemonic insights Memory health report + mnemonic dream-cycle Run dream replay + mnemonic mcp Start MCP server (stdio) + mnemonic install Auto-start on boot + mnemonic autopilot Autonomous activity log + mnemonic restore ~/.mnemonic/backups/backup.db Restore from backup EXIT CODES: 0 Success @@ -2861,547 +239,3 @@ EXIT CODES: ` fmt.Printf(usage, Version) } - -// autopilotCommand shows what the system has been doing autonomously. -func autopilotCommand(configPath string) { - _, db, _, _ := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - // Read health report - homeDir, _ := os.UserHomeDir() - healthPath := filepath.Join(homeDir, ".mnemonic", "health.json") - data, err := os.ReadFile(healthPath) - - fmt.Println("=== Mnemonic Autopilot Report ===") - fmt.Println() - - if err == nil { - var report orchestrator.HealthReport - if json.Unmarshal(data, &report) == nil { - fmt.Printf("Last report: %s\n", report.Timestamp.Format("2006-01-02 15:04:05")) - fmt.Printf("Uptime: %s\n", report.Uptime) - fmt.Printf("LLM available: %v\n", report.LLMAvailable) - fmt.Printf("Store healthy: %v\n", report.StoreHealthy) - fmt.Printf("Memories: %d\n", report.MemoryCount) - fmt.Printf("Patterns: %d\n", report.PatternCount) - fmt.Printf("Abstractions: %d\n", report.AbstractionCount) - fmt.Printf("Last consolidation: %s\n", report.LastConsolidation) - fmt.Printf("Autonomous actions: %d\n", report.AutonomousActions) - - if len(report.Warnings) > 0 { - fmt.Println() - fmt.Println("Warnings:") - for _, w := range report.Warnings { - fmt.Printf(" - %s\n", w) - } - } - } - } else { - fmt.Println("No health report found. Start the daemon to generate one.") - } - - // Show recent autonomous actions - fmt.Println() - fmt.Println("--- Recent Autonomous Actions ---") - actions, err := db.ListMetaObservations(ctx, "autonomous_action", 10) - if err == nil && len(actions) > 0 { - for _, a := range actions { - action := "" - if act, ok := a.Details["action"].(string); ok { - action = act - } - fmt.Printf(" [%s] %s (severity: %s)\n", - a.CreatedAt.Format("2006-01-02 15:04"), action, a.Severity) - } - } else { - fmt.Println(" No autonomous actions recorded yet.") - } - - // Show recent patterns discovered - fmt.Println() - fmt.Println("--- Discovered Patterns ---") - patterns, err := db.ListPatterns(ctx, "", 5) - if err == nil && len(patterns) > 0 { - for _, p := range patterns { - project := "" - if p.Project != "" { - project = fmt.Sprintf(" [%s]", p.Project) - } - fmt.Printf(" %s%s: %s (strength: %.2f, evidence: %d)\n", - p.Title, project, p.Description, p.Strength, len(p.EvidenceIDs)) - } - } else { - fmt.Println(" No patterns discovered yet.") - } - - // Show abstractions - fmt.Println() - fmt.Println("--- Abstractions ---") - hasAbstractions := false - for _, level := range []int{2, 3} { - abs, err := db.ListAbstractions(ctx, level, 5) - if err == nil && len(abs) > 0 { - hasAbstractions = true - for _, a := range abs { - levelLabel := "principle" - if a.Level == 3 { - levelLabel = "axiom" - } - fmt.Printf(" [%s] %s: %s (confidence: %.2f)\n", - levelLabel, a.Title, a.Description, a.Confidence) - } - } - } - if !hasAbstractions { - fmt.Println(" No abstractions generated yet.") - } - - fmt.Println() -} - -// buildEncodingConfig translates central config into the encoding agent's config struct. -func buildEncodingConfig(cfg *config.Config) encoding.EncodingConfig { - pollingInterval := time.Duration(cfg.Encoding.PollingIntervalSec) * time.Second - if pollingInterval <= 0 { - pollingInterval = 5 * time.Second - } - simThreshold := float32(cfg.Encoding.SimilarityThreshold) - if simThreshold <= 0 { - simThreshold = 0.3 - } - return encoding.EncodingConfig{ - PollingInterval: pollingInterval, - SimilarityThreshold: simThreshold, - MaxSimilarSearchResults: cfg.Encoding.FindSimilarLimit, - CompletionMaxTokens: cfg.Encoding.CompletionMaxTokens, - CompletionTemperature: float32(cfg.LLM.Temperature), - MaxConcurrentEncodings: cfg.Encoding.MaxConcurrentEncodings, - EnableLLMClassification: cfg.Encoding.EnableLLMClassification, - CoachingFile: cfg.Coaching.CoachingFile, - ExcludePatterns: cfg.Perception.Filesystem.ExcludePatterns, - ConceptVocabulary: cfg.Encoding.ConceptVocabulary, - MaxRetries: cfg.Encoding.MaxRetries, - MaxLLMContentChars: cfg.Encoding.MaxLLMContentChars, - MaxEmbeddingChars: cfg.Encoding.MaxEmbeddingChars, - TemporalWindowMin: cfg.Encoding.TemporalWindowMin, - BackoffThreshold: cfg.Encoding.BackoffThreshold, - BackoffBaseSec: cfg.Encoding.BackoffBaseSec, - BackoffMaxSec: cfg.Encoding.BackoffMaxSec, - BatchSizeEvent: cfg.Encoding.BatchSizeEvent, - BatchSizePoll: cfg.Encoding.BatchSizePoll, - DeduplicationThreshold: float32(cfg.Encoding.DeduplicationThreshold), - SalienceFloor: cfg.Encoding.SalienceFloor, - } -} - -// newLLMProvider creates the appropriate LLM provider based on config. -// For "api" (default), it creates an LMStudioProvider for OpenAI-compatible APIs. -// For "embedded", it creates an EmbeddedProvider for in-process llama.cpp inference. -func newLLMProvider(cfg *config.Config) llm.Provider { - switch cfg.LLM.Provider { - case "embedded": - ep := llm.NewEmbeddedProvider(llm.EmbeddedProviderConfig{ - ModelsDir: cfg.LLM.Embedded.ModelsDir, - ChatModelFile: cfg.LLM.Embedded.ChatModelFile, - EmbedModelFile: cfg.LLM.Embedded.EmbedModelFile, - ContextSize: cfg.LLM.Embedded.ContextSize, - GPULayers: cfg.LLM.Embedded.GPULayers, - Threads: cfg.LLM.Embedded.Threads, - BatchSize: cfg.LLM.Embedded.BatchSize, - MaxTokens: cfg.LLM.MaxTokens, - Temperature: float32(cfg.LLM.Temperature), - MaxConcurrent: cfg.LLM.MaxConcurrent, - }) - backend := llamacpp.NewBackend() - if backend != nil { - if err := ep.LoadModels(func() llm.Backend { - return llamacpp.NewBackend() - }); err != nil { - slog.Error("failed to load embedded models", "error", err) - } - } else { - slog.Warn("embedded provider selected but llama.cpp not compiled in (build with: make build-embedded)") - } - return ep - default: // "api" or "" - timeout := time.Duration(cfg.LLM.TimeoutSec) * time.Second - if timeout == 0 { - timeout = 30 * time.Second - } - return llm.NewLMStudioProvider( - cfg.LLM.Endpoint, - cfg.LLM.ChatModel, - cfg.LLM.EmbeddingModel, - cfg.LLM.APIKey, - timeout, - cfg.LLM.MaxConcurrent, - ) - } -} - -// dedupCommand scans active memories for near-duplicate clusters and archives duplicates. -// With --apply it modifies the DB; without it, it's a dry-run that reports what would change. -func dedupCommand(configPath string, dryRun bool) { - cfg, db, _, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - threshold := float32(cfg.Encoding.DeduplicationThreshold) - if threshold <= 0 { - threshold = 0.9 - } - - if dryRun { - fmt.Printf("Dedup dry-run (threshold: %.2f). Use --apply to execute.\n\n", threshold) - } else { - fmt.Printf("Dedup (threshold: %.2f). Archiving duplicates...\n\n", threshold) - } - - // Load all active memories in pages - var allMemories []store.Memory - offset := 0 - pageSize := 200 - for { - page, err := db.ListMemories(ctx, "active", pageSize, offset) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to load memories: %v\n", err) - os.Exit(1) - } - allMemories = append(allMemories, page...) - if len(page) < pageSize { - break - } - offset += pageSize - } - - // Filter to memories with embeddings - var withEmbeddings []store.Memory - for _, m := range allMemories { - if len(m.Embedding) > 0 { - withEmbeddings = append(withEmbeddings, m) - } - } - - fmt.Printf("Active memories: %d (%d with embeddings)\n", len(allMemories), len(withEmbeddings)) - - // Union-find clustering: for each pair above threshold, merge clusters - clusterOf := make(map[string]string) // memory ID → cluster representative ID - for i := range withEmbeddings { - clusterOf[withEmbeddings[i].ID] = withEmbeddings[i].ID - } - - // Find root of cluster (with path compression) - var find func(string) string - find = func(id string) string { - if clusterOf[id] != id { - clusterOf[id] = find(clusterOf[id]) - } - return clusterOf[id] - } - - // Union two IDs into the same cluster - union := func(a, b string) { - ra, rb := find(a), find(b) - if ra != rb { - clusterOf[ra] = rb - } - } - - // O(n^2) pairwise comparison — fine for <1000 memories - comparisons := 0 - for i := 0; i < len(withEmbeddings); i++ { - for j := i + 1; j < len(withEmbeddings); j++ { - sim := agentutil.CosineSimilarity(withEmbeddings[i].Embedding, withEmbeddings[j].Embedding) - comparisons++ - if sim >= threshold { - union(withEmbeddings[i].ID, withEmbeddings[j].ID) - } - } - } - - // Build clusters - clusters := make(map[string][]store.Memory) // representative ID → members - for _, m := range withEmbeddings { - root := find(m.ID) - clusters[root] = append(clusters[root], m) - } - - // Filter to clusters with more than 1 member (actual duplicates) - dupClusters := 0 - totalDups := 0 - totalArchived := 0 - totalAssocTransferred := 0 - - for _, members := range clusters { - if len(members) <= 1 { - continue - } - dupClusters++ - totalDups += len(members) - - // Pick survivor: highest salience, then most recently accessed, then newest - survivor := members[0] - for _, m := range members[1:] { - if m.Salience > survivor.Salience { - survivor = m - } else if m.Salience == survivor.Salience && m.LastAccessed.After(survivor.LastAccessed) { - survivor = m - } else if m.Salience == survivor.Salience && m.LastAccessed.Equal(survivor.LastAccessed) && m.CreatedAt.After(survivor.CreatedAt) { - survivor = m - } - } - - fmt.Printf("Cluster (%d members):\n", len(members)) - fmt.Printf(" Survivor: %s (salience=%.2f) %s\n", survivor.ID[:8], survivor.Salience, truncate(survivor.Summary, 60)) - for _, m := range members { - if m.ID == survivor.ID { - continue - } - fmt.Printf(" Archive: %s (salience=%.2f) %s\n", m.ID[:8], m.Salience, truncate(m.Summary, 60)) - - if !dryRun { - // Transfer associations from archived memory to survivor - assocs, err := db.GetAssociations(ctx, m.ID) - if err != nil { - log.Warn("failed to get associations", "memory_id", m.ID, "error", err) - } else { - for _, a := range assocs { - targetID := a.TargetID - if targetID == m.ID { - targetID = a.SourceID - } - if targetID == survivor.ID { - continue // skip self-association - } - newAssoc := store.Association{ - SourceID: survivor.ID, - TargetID: targetID, - Strength: a.Strength, - RelationType: a.RelationType, - CreatedAt: a.CreatedAt, - LastActivated: a.LastActivated, - } - if err := db.CreateAssociation(ctx, newAssoc); err != nil { - // Likely duplicate — ignore - log.Debug("association transfer skipped (likely exists)", "source", survivor.ID[:8], "target", targetID[:8]) - } else { - totalAssocTransferred++ - } - } - } - - // Archive the duplicate - if err := db.UpdateState(ctx, m.ID, "archived"); err != nil { - log.Warn("failed to archive duplicate", "memory_id", m.ID, "error", err) - } else { - totalArchived++ - } - } - } - fmt.Println() - } - - fmt.Printf("Summary:\n") - fmt.Printf(" Comparisons: %d\n", comparisons) - fmt.Printf(" Dup clusters: %d (%d memories)\n", dupClusters, totalDups) - if dryRun { - fmt.Printf(" Would archive: %d memories\n", totalDups-dupClusters) - fmt.Printf("\nRun with --apply to execute.\n") - } else { - fmt.Printf(" Archived: %d memories\n", totalArchived) - fmt.Printf(" Associations: %d transferred\n", totalAssocTransferred) - - // Clean up dangling associations pointing to archived memories - pruned, err := db.PruneOrphanedAssociations(ctx) - if err != nil { - log.Warn("failed to prune orphaned associations", "error", err) - } else { - fmt.Printf(" Orphaned assocs pruned: %d\n", pruned) - } - } -} - -// resetPatternsCommand recalculates pattern strengths using logarithmic scaling -// and merges near-duplicate patterns. Dry-run by default; use --apply to execute. -func resetPatternsCommand(configPath string, dryRun bool) { - _, db, _, log := initRuntime(configPath) - defer func() { _ = db.Close() }() - - ctx := context.Background() - - // Load all patterns (no project filter, high limit) - patterns, err := db.ListPatterns(ctx, "", 1000) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to load patterns: %v\n", err) - os.Exit(1) - } - - if dryRun { - fmt.Printf("Pattern reset dry-run. Use --apply to execute.\n\n") - } else { - fmt.Printf("Pattern reset. Recalculating strengths and merging duplicates...\n\n") - } - - fmt.Printf("Total patterns: %d\n\n", len(patterns)) - - // Phase 1: Recalculate strengths using logarithmic formula - strengthCeiling := float32(0.95) - strongCeiling := float32(1.0) - strongMinCount := 50 - - fmt.Printf("=== Strength Recalculation ===\n") - fmt.Printf("Formula: 0.5 + 0.03 * log2(1 + evidenceCount)\n") - fmt.Printf("Ceiling: %.2f (%.2f with %d+ evidence)\n\n", strengthCeiling, strongCeiling, strongMinCount) - - recalculated := 0 - for i := range patterns { - p := &patterns[i] - if p.State != "active" { - continue - } - evidenceCount := len(p.EvidenceIDs) - newStrength := float32(0.5) + 0.03*float32(math.Log2(1+float64(evidenceCount))) - ceiling := strengthCeiling - if evidenceCount > strongMinCount { - ceiling = strongCeiling - } - if newStrength > ceiling { - newStrength = ceiling - } - if newStrength != p.Strength { - fmt.Printf(" %-50s evidence=%3d %.2f -> %.2f\n", - truncate(p.Title, 50), evidenceCount, p.Strength, newStrength) - if !dryRun { - p.Strength = newStrength - p.UpdatedAt = time.Now() - if err := db.UpdatePattern(ctx, *p); err != nil { - log.Warn("failed to update pattern strength", "pattern_id", p.ID, "error", err) - } - } - recalculated++ - } - } - fmt.Printf("\nRecalculated: %d patterns\n\n", recalculated) - - // Phase 2: Merge near-duplicate patterns (>0.80 cosine similarity) - const mergeThreshold = float32(0.80) - fmt.Printf("=== Duplicate Pattern Merge (threshold: %.2f) ===\n\n", mergeThreshold) - - // Filter to active patterns with embeddings - var active []int - for i, p := range patterns { - if p.State == "active" && len(p.Embedding) > 0 { - active = append(active, i) - } - } - - // Union-find for pattern clustering - parent := make(map[int]int) - for _, i := range active { - parent[i] = i - } - var findRoot func(int) int - findRoot = func(i int) int { - if parent[i] != i { - parent[i] = findRoot(parent[i]) - } - return parent[i] - } - - for ai := 0; ai < len(active); ai++ { - for bi := ai + 1; bi < len(active); bi++ { - i, j := active[ai], active[bi] - sim := agentutil.CosineSimilarity(patterns[i].Embedding, patterns[j].Embedding) - if sim >= mergeThreshold { - ri, rj := findRoot(i), findRoot(j) - if ri != rj { - parent[ri] = rj - } - } - } - } - - // Build clusters - clusters := make(map[int][]int) - for _, i := range active { - root := findRoot(i) - clusters[root] = append(clusters[root], i) - } - - merged := 0 - for _, members := range clusters { - if len(members) <= 1 { - continue - } - - // Pick survivor: most evidence, then highest strength - survivorIdx := members[0] - for _, idx := range members[1:] { - if len(patterns[idx].EvidenceIDs) > len(patterns[survivorIdx].EvidenceIDs) { - survivorIdx = idx - } else if len(patterns[idx].EvidenceIDs) == len(patterns[survivorIdx].EvidenceIDs) && - patterns[idx].Strength > patterns[survivorIdx].Strength { - survivorIdx = idx - } - } - - survivor := &patterns[survivorIdx] - fmt.Printf("Cluster (%d patterns):\n", len(members)) - fmt.Printf(" Survivor: %s (evidence=%d)\n", truncate(survivor.Title, 60), len(survivor.EvidenceIDs)) - - for _, idx := range members { - if idx == survivorIdx { - continue - } - dup := &patterns[idx] - fmt.Printf(" Archive: %s (evidence=%d)\n", truncate(dup.Title, 60), len(dup.EvidenceIDs)) - - if !dryRun { - // Merge evidence IDs into survivor - existingEvidence := make(map[string]bool) - for _, eid := range survivor.EvidenceIDs { - existingEvidence[eid] = true - } - for _, eid := range dup.EvidenceIDs { - if !existingEvidence[eid] { - survivor.EvidenceIDs = append(survivor.EvidenceIDs, eid) - } - } - survivor.UpdatedAt = time.Now() - if err := db.UpdatePattern(ctx, *survivor); err != nil { - log.Warn("failed to update survivor pattern", "id", survivor.ID, "error", err) - } - - // Archive the duplicate - dup.State = "archived" - dup.UpdatedAt = time.Now() - if err := db.UpdatePattern(ctx, *dup); err != nil { - log.Warn("failed to archive duplicate pattern", "id", dup.ID, "error", err) - } - } - merged++ - } - fmt.Println() - } - - fmt.Printf("Summary:\n") - fmt.Printf(" Strengths recalculated: %d\n", recalculated) - if dryRun { - fmt.Printf(" Would merge: %d duplicate patterns\n", merged) - fmt.Printf("\nRun with --apply to execute.\n") - } else { - fmt.Printf(" Patterns merged: %d\n", merged) - } -} - - -// truncate shortens a string to maxLen with ellipsis. -func truncate(s string, maxLen int) string { - if len(s) <= maxLen { - return s - } - return s[:maxLen-3] + "..." -} diff --git a/cmd/mnemonic/memory_cli.go b/cmd/mnemonic/memory_cli.go new file mode 100644 index 00000000..1b6a5d91 --- /dev/null +++ b/cmd/mnemonic/memory_cli.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/appsprout-dev/mnemonic/internal/agent/consolidation" + "github.com/appsprout-dev/mnemonic/internal/agent/encoding" + "github.com/appsprout-dev/mnemonic/internal/agent/retrieval" + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/events" + "github.com/appsprout-dev/mnemonic/internal/store" + + "github.com/google/uuid" +) + +// rememberCommand stores text in the memory system. +// If the daemon is running, it writes the raw memory to the DB and notifies the +// daemon via API so the daemon's own encoding agent picks it up (no duplicate encoder). +// If the daemon is NOT running, it spins up a local encoder and waits for it to finish. +func rememberCommand(configPath, text string) { + const maxRememberBytes = 10240 // 10KB + if len(text) > maxRememberBytes { + fmt.Fprintf(os.Stderr, "Error: input too large (%d bytes, max %d). Pipe large content through 'mnemonic ingest' instead.\n", len(text), maxRememberBytes) + os.Exit(1) + } + + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + // Write raw memory + raw := store.RawMemory{ + ID: uuid.New().String(), + Timestamp: time.Now(), + Source: "user", + Type: "explicit", + Content: text, + InitialSalience: 0.7, + CreatedAt: time.Now(), + } + if err := db.WriteRaw(ctx, raw); err != nil { + fmt.Fprintf(os.Stderr, "Error writing raw memory: %v\n", err) + os.Exit(1) + } + + // If daemon is running, just write raw and let the daemon's encoder handle it. + // The daemon's encoding agent polls for unprocessed raw memories every 5s. + if running, _ := daemon.IsRunning(); running { + fmt.Printf("Remembered: %s\n", text) + fmt.Printf(" (daemon is running — encoding will happen automatically)\n") + return + } + + // Daemon not running — spin up a local encoder with a generous timeout + fmt.Printf("Encoding locally (daemon not running)...\n") + + timeoutSec := cfg.LLM.TimeoutSec + if timeoutSec < 60 { + timeoutSec = 60 + } + encodeCtx, encodeCancel := context.WithTimeout(ctx, time.Duration(timeoutSec)*time.Second) + defer encodeCancel() + + bus := events.NewInMemoryBus(100) + defer func() { _ = bus.Close() }() + + encoder := encoding.NewEncodingAgentWithConfig(db, llmProvider, log, buildEncodingConfig(cfg)) + if err := encoder.Start(encodeCtx, bus); err != nil { + fmt.Fprintf(os.Stderr, "Error starting encoder: %v\n", err) + os.Exit(1) + } + + // Publish event to trigger encoding + _ = bus.Publish(encodeCtx, events.RawMemoryCreated{ + ID: raw.ID, + Source: raw.Source, + Salience: raw.InitialSalience, + Ts: raw.Timestamp, + }) + + // Poll until the raw memory is marked processed or we time out + deadline := time.After(time.Duration(timeoutSec) * time.Second) + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + encoded := false + for !encoded { + select { + case <-deadline: + fmt.Fprintf(os.Stderr, "Warning: encoding timed out after %ds\n", timeoutSec) + encoded = true + case <-ticker.C: + r, err := db.GetRaw(ctx, raw.ID) + if err == nil && r.Processed { + encoded = true + } + } + } + + _ = encoder.Stop() + fmt.Printf("Remembered: %s\n", text) +} + +// recallCommand retrieves memories matching a query. +func recallCommand(configPath, query string) { + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + retriever := retrieval.NewRetrievalAgent(db, llmProvider, buildRetrievalConfig(cfg), log, nil) + + resp, err := retriever.Query(ctx, retrieval.QueryRequest{ + Query: query, + Synthesize: true, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "Error recalling: %v\n", err) + os.Exit(1) + } + + if len(resp.Memories) == 0 { + fmt.Println("No memories found.") + return + } + + fmt.Printf("Found %d memories (took %dms):\n\n", len(resp.Memories), resp.TookMs) + for i, result := range resp.Memories { + fmt.Printf(" %d. [%.2f] %s\n", i+1, result.Score, result.Memory.Summary) + if result.Memory.Content != "" && result.Memory.Content != result.Memory.Summary { + fmt.Printf(" %s\n", result.Memory.Content) + } + fmt.Println() + } + + if resp.Synthesis != "" { + fmt.Printf("Synthesis:\n %s\n", resp.Synthesis) + } +} + +// consolidateCommand runs a single memory consolidation cycle. +func consolidateCommand(configPath string) { + cfg, db, llmProvider, log := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + bus := events.NewInMemoryBus(100) + defer func() { _ = bus.Close() }() + + consolidator := consolidation.NewConsolidationAgent(db, llmProvider, toConsolidationConfig(cfg), log) + + fmt.Println("Running consolidation cycle...") + + report, err := consolidator.RunOnce(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Consolidation failed: %v\n", err) + os.Exit(1) + } + + // Publish events for dashboard + _ = bus.Publish(ctx, events.ConsolidationCompleted{ + DurationMs: report.Duration.Milliseconds(), + MemoriesProcessed: report.MemoriesProcessed, + MemoriesDecayed: report.MemoriesDecayed, + MergedClusters: report.MergesPerformed, + AssociationsPruned: report.AssociationsPruned, + Ts: time.Now(), + }) + + fmt.Printf("Consolidation complete (%dms):\n", report.Duration.Milliseconds()) + fmt.Printf(" Memories processed: %d\n", report.MemoriesProcessed) + fmt.Printf(" Salience decayed: %d\n", report.MemoriesDecayed) + fmt.Printf(" Transitioned fading: %d\n", report.TransitionedFading) + fmt.Printf(" Transitioned archived: %d\n", report.TransitionedArchived) + fmt.Printf(" Associations pruned: %d\n", report.AssociationsPruned) + fmt.Printf(" Merges performed: %d\n", report.MergesPerformed) + fmt.Printf(" Expired deleted: %d\n", report.ExpiredDeleted) +} diff --git a/cmd/mnemonic/purge.go b/cmd/mnemonic/purge.go new file mode 100644 index 00000000..761ae2cd --- /dev/null +++ b/cmd/mnemonic/purge.go @@ -0,0 +1,175 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" +) + +// purgeCommand stops the daemon, deletes the database and log, and starts fresh. +func purgeCommand(configPath string) { + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + // Confirm with user + fmt.Printf("%sThis will permanently delete all memories and reset the database.%s\n", colorRed, colorReset) + fmt.Printf(" Database: %s\n", cfg.Store.DBPath) + fmt.Printf("\nType 'yes' to confirm: ") + + var confirmation string + _, _ = fmt.Scanln(&confirmation) + if confirmation != "yes" { + fmt.Println("Aborted.") + return + } + + // Stop daemon if running + if running, pid := daemon.IsRunning(); running { + fmt.Printf("Stopping daemon (PID %d)...\n", pid) + if err := daemon.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to stop daemon: %v\n", err) + fmt.Fprintf(os.Stderr, "Please stop it manually and try again.\n") + os.Exit(1) + } + time.Sleep(1 * time.Second) + } + + // Resolve DB path (handle ~ expansion) + dbPath := cfg.Store.DBPath + if strings.HasPrefix(dbPath, "~") { + home, _ := os.UserHomeDir() + dbPath = filepath.Join(home, dbPath[1:]) + } + + // Delete database file and WAL/SHM files + deleted := 0 + for _, suffix := range []string{"", "-wal", "-shm"} { + path := dbPath + suffix + if _, err := os.Stat(path); err == nil { + if err := os.Remove(path); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to delete %s: %v\n", path, err) + } else { + deleted++ + } + } + } + + if deleted > 0 { + fmt.Printf("%sDatabase purged.%s Deleted %d file(s).\n", colorGreen, colorReset, deleted) + } else { + fmt.Printf("No database files found at %s (already clean).\n", dbPath) + } + + fmt.Println("\nThe database will be recreated automatically on next start.") + fmt.Printf(" mnemonic start\n") +} + +// cleanupCommand scans raw_memories for paths matching exclude patterns and +// bulk-marks them as processed, then archives any encoded memories derived from them. +func cleanupCommand(configPath string, args []string) { + cfg, db, _, _ := initRuntime(configPath) + defer func() { _ = db.Close() }() + + ctx := context.Background() + + patterns := cfg.Perception.Filesystem.ExcludePatterns + if len(patterns) == 0 { + fmt.Println("No exclude patterns configured in config.yaml — nothing to clean.") + return + } + + // Check for flags + autoConfirm := false + cleanPatterns := false + for _, a := range args { + if a == "--yes" || a == "-y" { + autoConfirm = true + } + if a == "--patterns" { + cleanPatterns = true + } + } + + // Count what would be cleaned + rawCount, err := db.CountRawUnprocessedByPathPatterns(ctx, patterns) + if err != nil { + fmt.Fprintf(os.Stderr, "Error counting raw memories: %v\n", err) + os.Exit(1) + } + + fmt.Printf("%sCleanup Summary%s\n", colorBold, colorReset) + fmt.Printf(" Exclude patterns: %d (from config.yaml)\n", len(patterns)) + fmt.Printf(" Unprocessed raw events: %s%d%s matching exclude patterns\n", colorYellow, rawCount, colorReset) + if cleanPatterns { + fmt.Printf(" --patterns flag: will archive all active patterns and abstractions\n") + } + + if rawCount == 0 && !cleanPatterns { + fmt.Println("\nNothing to clean up.") + return + } + + if !autoConfirm { + fmt.Printf("\nThis will mark matching raw events as processed and archive derived memories.\n") + if cleanPatterns { + fmt.Printf("It will also archive ALL active patterns and abstractions (they regenerate from clean data).\n") + } + fmt.Printf("Type 'yes' to confirm: ") + var confirmation string + _, _ = fmt.Scanln(&confirmation) + if confirmation != "yes" { + fmt.Println("Aborted.") + return + } + } + + rawCleaned := 0 + memArchived := 0 + + if rawCount > 0 { + // Mark raw events as processed + rawCleaned, err = db.BulkMarkRawProcessedByPathPatterns(ctx, patterns) + if err != nil { + fmt.Fprintf(os.Stderr, "Error cleaning raw memories: %v\n", err) + os.Exit(1) + } + + // Archive derived encoded memories + memArchived, err = db.ArchiveMemoriesByRawPathPatterns(ctx, patterns) + if err != nil { + fmt.Fprintf(os.Stderr, "Error archiving memories: %v\n", err) + os.Exit(1) + } + } + + patternsArchived := 0 + abstractionsArchived := 0 + if cleanPatterns { + patternsArchived, err = db.ArchiveAllPatterns(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Error archiving patterns: %v\n", err) + os.Exit(1) + } + abstractionsArchived, err = db.ArchiveAllAbstractions(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Error archiving abstractions: %v\n", err) + os.Exit(1) + } + } + + fmt.Printf("\n%sCleanup complete%s\n", colorGreen, colorReset) + fmt.Printf(" Raw events marked processed: %d\n", rawCleaned) + fmt.Printf(" Encoded memories archived: %d\n", memArchived) + if cleanPatterns { + fmt.Printf(" Patterns archived: %d\n", patternsArchived) + fmt.Printf(" Abstractions archived: %d\n", abstractionsArchived) + } +} diff --git a/cmd/mnemonic/restore.go b/cmd/mnemonic/restore.go new file mode 100644 index 00000000..a39eaadc --- /dev/null +++ b/cmd/mnemonic/restore.go @@ -0,0 +1,73 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/appsprout-dev/mnemonic/internal/backup" + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/store/sqlite" +) + +// restoreCommand restores the database from a SQLite backup file. +func restoreCommand(configPath string, backupPath string) { + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + // Verify backup file exists + info, err := os.Stat(backupPath) + if err != nil { + die(exitUsage, fmt.Sprintf("backup file not found: %s", backupPath), "check the file path") + } + if info.IsDir() { + die(exitUsage, fmt.Sprintf("%s is a directory, not a backup file", backupPath), "provide a .db file path") + } + + // Verify backup integrity by opening it as a SQLite database + fmt.Printf("Verifying backup integrity: %s\n", backupPath) + testStore, err := sqlite.NewSQLiteStore(backupPath, 5000) + if err != nil { + die(exitDatabase, fmt.Sprintf("backup is not a valid SQLite database: %v", err), "") + } + intCtx, intCancel := context.WithTimeout(context.Background(), 30*time.Second) + intErr := testStore.CheckIntegrity(intCtx) + intCancel() + _ = testStore.Close() + if intErr != nil { + die(exitDatabase, fmt.Sprintf("backup file is corrupted: %v", intErr), "") + } + fmt.Printf(" %s✓ Backup integrity verified%s\n", colorGreen, colorReset) + + // Check if daemon is running + svc := daemon.NewServiceManager() + if running, _ := svc.IsRunning(); running { + die(exitGeneral, "daemon is running", "mnemonic stop") + } + + // If current DB exists, move it aside + dbPath := cfg.Store.DBPath + if _, statErr := os.Stat(dbPath); statErr == nil { + aside := dbPath + ".pre-restore" + fmt.Printf(" Moving current database to %s\n", aside) + if err := os.Rename(dbPath, aside); err != nil { + die(exitPermission, fmt.Sprintf("moving current database: %v", err), "check file permissions") + } + } + + // Copy backup to DB path + _ = cfg.EnsureDataDir() + if err := backup.ExportSQLite(context.Background(), backupPath, dbPath); err != nil { + fmt.Fprintf(os.Stderr, "Error copying backup to database path: %v\n", err) + os.Exit(1) + } + + fmt.Printf("\n%s✓ Database restored from %s%s\n", colorGreen, filepath.Base(backupPath), colorReset) + fmt.Printf(" Database: %s (%.1f KB)\n", dbPath, float64(info.Size())/1024) + fmt.Printf(" Start the daemon with 'mnemonic start' or 'mnemonic serve'.\n") +} diff --git a/cmd/mnemonic/runtime.go b/cmd/mnemonic/runtime.go new file mode 100644 index 00000000..b65332d2 --- /dev/null +++ b/cmd/mnemonic/runtime.go @@ -0,0 +1,233 @@ +package main + +import ( + "fmt" + "log/slog" + "time" + + "github.com/appsprout-dev/mnemonic/internal/agent/consolidation" + "github.com/appsprout-dev/mnemonic/internal/agent/encoding" + "github.com/appsprout-dev/mnemonic/internal/agent/retrieval" + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/llm" + "github.com/appsprout-dev/mnemonic/internal/llm/llamacpp" + "github.com/appsprout-dev/mnemonic/internal/logger" + "github.com/appsprout-dev/mnemonic/internal/store/sqlite" +) + +// buildRetrievalConfig maps the central config to the retrieval agent's config struct. +func buildRetrievalConfig(cfg *config.Config) retrieval.RetrievalConfig { + return retrieval.RetrievalConfig{ + MaxHops: cfg.Retrieval.MaxHops, + ActivationThreshold: float32(cfg.Retrieval.ActivationThreshold), + DecayFactor: float32(cfg.Retrieval.DecayFactor), + MaxResults: cfg.Retrieval.MaxResults, + MaxToolCalls: cfg.Retrieval.MaxToolCalls, + SynthesisMaxTokens: cfg.Retrieval.SynthesisMaxTokens, + MergeAlpha: float32(cfg.Retrieval.MergeAlpha), + DualHitBonus: float32(cfg.Retrieval.DualHitBonus), + + FTSCandidateLimit: cfg.Retrieval.FTSCandidateLimit, + EmbeddingCandidateLimit: cfg.Retrieval.EmbeddingCandidateLimit, + PatternSearchLimit: cfg.Retrieval.PatternSearchLimit, + AbstractionSearchLimit: cfg.Retrieval.AbstractionSearchLimit, + + FTSRankWeight: float32(cfg.Retrieval.FTSRankWeight), + FTSSalienceWeight: float32(cfg.Retrieval.FTSSalienceWeight), + DefaultSalience: float32(cfg.Retrieval.DefaultSalience), + + TimeRangeBaseScore: float32(cfg.Retrieval.TimeRangeBaseScore), + TimeRangeSalienceWt: float32(cfg.Retrieval.TimeRangeSalienceWt), + + RecencyBoostWeight: float32(cfg.Retrieval.RecencyBoostWeight), + RecencyHalfLifeDays: float32(cfg.Retrieval.RecencyHalfLifeDays), + + ActivityBonusMax: float32(cfg.Retrieval.ActivityBonusMax), + ActivityBonusScale: float32(cfg.Retrieval.ActivityBonusScale), + + CriticalBoost: float32(cfg.Retrieval.CriticalBoost), + ImportantBoost: float32(cfg.Retrieval.ImportantBoost), + + DiversityLambda: float32(cfg.Retrieval.DiversityLambda), + DiversityThreshold: float32(cfg.Retrieval.DiversityThreshold), + + FeedbackWeight: float32(cfg.Retrieval.FeedbackWeight), + SourceWeights: convertSourceWeights(cfg.Retrieval.SourceWeights), + TypeWeights: convertSourceWeights(cfg.Retrieval.TypeWeights), + + ContextBoostWindowMin: cfg.Perception.RecallBoostWindowMin, + ContextBoostMax: float32(cfg.Perception.RecallBoostMax), + ContextBoostSources: convertContextBoostSources(cfg.Retrieval.ContextBoostSources), + } +} + +// convertContextBoostSources converts []string to map[string]bool. +func convertContextBoostSources(src []string) map[string]bool { + if src == nil { + return nil + } + out := make(map[string]bool, len(src)) + for _, s := range src { + out[s] = true + } + return out +} + +// convertSourceWeights converts map[string]float64 to map[string]float32. +func convertSourceWeights(src map[string]float64) map[string]float32 { + if src == nil { + return nil + } + out := make(map[string]float32, len(src)) + for k, v := range src { + out[k] = float32(v) + } + return out +} + +// initRuntime loads config, opens store and LLM for CLI commands. +// The returned Provider includes training data capture if enabled in config. +func initRuntime(configPath string) (*config.Config, *sqlite.SQLiteStore, llm.Provider, *slog.Logger) { + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + log, err := logger.New(logger.Config{Level: "warn", Format: "text"}) + if err != nil { + die(exitGeneral, fmt.Sprintf("initializing logger: %v", err), "") + } + + _ = cfg.EnsureDataDir() + + db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err != nil { + die(exitDatabase, fmt.Sprintf("opening database: %v", err), "mnemonic diagnose") + } + + provider := newLLMProvider(cfg) + + // Wrap with training data capture if enabled + if cfg.Training.CaptureEnabled && cfg.Training.CaptureDir != "" { + provider = llm.NewTrainingCaptureProvider(provider, "cli", cfg.Training.CaptureDir) + } + + return cfg, db, provider, log +} + +// toConsolidationConfig converts the global config's consolidation settings to the agent's config. +func toConsolidationConfig(cfg *config.Config) consolidation.ConsolidationConfig { + return consolidation.ConsolidationConfig{ + Interval: cfg.Consolidation.Interval, + DecayRate: cfg.Consolidation.DecayRate, + FadeThreshold: cfg.Consolidation.FadeThreshold, + ArchiveThreshold: cfg.Consolidation.ArchiveThreshold, + RetentionWindow: cfg.Consolidation.RetentionWindow, + MaxMemoriesPerCycle: cfg.Consolidation.MaxMemoriesPerCycle, + MaxMergesPerCycle: cfg.Consolidation.MaxMergesPerCycle, + MinClusterSize: cfg.Consolidation.MinClusterSize, + AssocPruneThreshold: consolidation.DefaultConfig().AssocPruneThreshold, + RecencyProtection24h: cfg.Consolidation.RecencyProtection24h, + RecencyProtection168h: cfg.Consolidation.RecencyProtection168h, + AccessResistanceCap: cfg.Consolidation.AccessResistanceCap, + AccessResistanceScale: cfg.Consolidation.AccessResistanceScale, + MergeSimilarityThreshold: cfg.Consolidation.MergeSimilarityThreshold, + PatternMatchThreshold: cfg.Consolidation.PatternMatchThreshold, + PatternStrengthIncrement: float32(cfg.Consolidation.PatternStrengthIncrement), + PatternIncrementCap: float32(cfg.Consolidation.PatternIncrementCap), + LargeClusterBonus: float32(cfg.Consolidation.LargeClusterBonus), + LargeClusterMinSize: cfg.Consolidation.LargeClusterMinSize, + PatternStrengthCeiling: float32(cfg.Consolidation.PatternStrengthCeiling), + StrongEvidenceCeiling: float32(cfg.Consolidation.StrongEvidenceCeiling), + StrongEvidenceMinCount: cfg.Consolidation.StrongEvidenceMinCount, + PatternBaselineDecay: float32(cfg.Consolidation.PatternBaselineDecay), + StaleDecayHealthy: float32(cfg.Consolidation.StaleDecayHealthy), + StaleDecayModerate: float32(cfg.Consolidation.StaleDecayModerate), + StaleDecayAggressive: float32(cfg.Consolidation.StaleDecayAggressive), + SelfSustainingMinEvidence: cfg.Consolidation.SelfSustainingMinEvidence, + SelfSustainingMinStrength: float32(cfg.Consolidation.SelfSustainingMinStrength), + SelfSustainingDecay: float32(cfg.Consolidation.SelfSustainingDecay), + NeverRecalledArchiveDays: cfg.Consolidation.NeverRecalledArchiveDays, + StartupDelay: time.Duration(cfg.Consolidation.StartupDelaySec) * time.Second, + } +} + +// buildEncodingConfig translates central config into the encoding agent's config struct. +func buildEncodingConfig(cfg *config.Config) encoding.EncodingConfig { + pollingInterval := time.Duration(cfg.Encoding.PollingIntervalSec) * time.Second + if pollingInterval <= 0 { + pollingInterval = 5 * time.Second + } + simThreshold := float32(cfg.Encoding.SimilarityThreshold) + if simThreshold <= 0 { + simThreshold = 0.3 + } + return encoding.EncodingConfig{ + PollingInterval: pollingInterval, + SimilarityThreshold: simThreshold, + MaxSimilarSearchResults: cfg.Encoding.FindSimilarLimit, + CompletionMaxTokens: cfg.Encoding.CompletionMaxTokens, + CompletionTemperature: float32(cfg.LLM.Temperature), + MaxConcurrentEncodings: cfg.Encoding.MaxConcurrentEncodings, + EnableLLMClassification: cfg.Encoding.EnableLLMClassification, + CoachingFile: cfg.Coaching.CoachingFile, + ExcludePatterns: cfg.Perception.Filesystem.ExcludePatterns, + ConceptVocabulary: cfg.Encoding.ConceptVocabulary, + MaxRetries: cfg.Encoding.MaxRetries, + MaxLLMContentChars: cfg.Encoding.MaxLLMContentChars, + MaxEmbeddingChars: cfg.Encoding.MaxEmbeddingChars, + TemporalWindowMin: cfg.Encoding.TemporalWindowMin, + BackoffThreshold: cfg.Encoding.BackoffThreshold, + BackoffBaseSec: cfg.Encoding.BackoffBaseSec, + BackoffMaxSec: cfg.Encoding.BackoffMaxSec, + BatchSizeEvent: cfg.Encoding.BatchSizeEvent, + BatchSizePoll: cfg.Encoding.BatchSizePoll, + DeduplicationThreshold: float32(cfg.Encoding.DeduplicationThreshold), + SalienceFloor: cfg.Encoding.SalienceFloor, + } +} + +// newLLMProvider creates the appropriate LLM provider based on config. +// For "api" (default), it creates an LMStudioProvider for OpenAI-compatible APIs. +// For "embedded", it creates an EmbeddedProvider for in-process llama.cpp inference. +func newLLMProvider(cfg *config.Config) llm.Provider { + switch cfg.LLM.Provider { + case "embedded": + ep := llm.NewEmbeddedProvider(llm.EmbeddedProviderConfig{ + ModelsDir: cfg.LLM.Embedded.ModelsDir, + ChatModelFile: cfg.LLM.Embedded.ChatModelFile, + EmbedModelFile: cfg.LLM.Embedded.EmbedModelFile, + ContextSize: cfg.LLM.Embedded.ContextSize, + GPULayers: cfg.LLM.Embedded.GPULayers, + Threads: cfg.LLM.Embedded.Threads, + BatchSize: cfg.LLM.Embedded.BatchSize, + MaxTokens: cfg.LLM.MaxTokens, + Temperature: float32(cfg.LLM.Temperature), + MaxConcurrent: cfg.LLM.MaxConcurrent, + }) + backend := llamacpp.NewBackend() + if backend != nil { + if err := ep.LoadModels(func() llm.Backend { + return llamacpp.NewBackend() + }); err != nil { + slog.Error("failed to load embedded models", "error", err) + } + } else { + slog.Warn("embedded provider selected but llama.cpp not compiled in (build with: make build-embedded)") + } + return ep + default: // "api" or "" + timeout := time.Duration(cfg.LLM.TimeoutSec) * time.Second + if timeout == 0 { + timeout = 30 * time.Second + } + return llm.NewLMStudioProvider( + cfg.LLM.Endpoint, + cfg.LLM.ChatModel, + cfg.LLM.EmbeddingModel, + cfg.LLM.APIKey, + timeout, + cfg.LLM.MaxConcurrent, + ) + } +} diff --git a/cmd/mnemonic/serve.go b/cmd/mnemonic/serve.go new file mode 100644 index 00000000..97fa4806 --- /dev/null +++ b/cmd/mnemonic/serve.go @@ -0,0 +1,730 @@ +package main + +import ( + "context" + "fmt" + "log/slog" + "os" + "os/signal" + "path/filepath" + "runtime" + "syscall" + "time" + + "github.com/appsprout-dev/mnemonic/internal/agent/abstraction" + "github.com/appsprout-dev/mnemonic/internal/agent/consolidation" + "github.com/appsprout-dev/mnemonic/internal/agent/dreaming" + "github.com/appsprout-dev/mnemonic/internal/agent/encoding" + "github.com/appsprout-dev/mnemonic/internal/agent/episoding" + "github.com/appsprout-dev/mnemonic/internal/agent/metacognition" + "github.com/appsprout-dev/mnemonic/internal/agent/orchestrator" + "github.com/appsprout-dev/mnemonic/internal/agent/perception" + "github.com/appsprout-dev/mnemonic/internal/agent/reactor" + "github.com/appsprout-dev/mnemonic/internal/agent/retrieval" + "github.com/appsprout-dev/mnemonic/internal/api" + "github.com/appsprout-dev/mnemonic/internal/api/routes" + "github.com/appsprout-dev/mnemonic/internal/backup" + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/events" + "github.com/appsprout-dev/mnemonic/internal/llm" + "github.com/appsprout-dev/mnemonic/internal/logger" + "github.com/appsprout-dev/mnemonic/internal/mcp" + "github.com/appsprout-dev/mnemonic/internal/store" + "github.com/appsprout-dev/mnemonic/internal/store/sqlite" + "github.com/appsprout-dev/mnemonic/internal/updater" + "github.com/appsprout-dev/mnemonic/internal/watcher" + + clipwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/clipboard" + fswatcher "github.com/appsprout-dev/mnemonic/internal/watcher/filesystem" + gitwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/git" + termwatcher "github.com/appsprout-dev/mnemonic/internal/watcher/terminal" + + "github.com/google/uuid" +) + +// serveCommand runs the mnemonic daemon. +func serveCommand(configPath string) { + // If running as a Windows Service, delegate to the service handler. + if daemon.IsWindowsService() { + execPath, _ := os.Executable() + if err := daemon.RunAsService(execPath, configPath); err != nil { + die(exitGeneral, fmt.Sprintf("running as Windows service: %v", err), "") + } + return + } + + // Load configuration + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + // Check config file permissions + if warn := config.WarnPermissions(configPath); warn != "" { + fmt.Fprintf(os.Stderr, "Warning: %s\n", warn) + } + + // Build project resolver from config + projectResolver := config.NewProjectResolver(cfg.Projects) + + // Initialize logger + log, err := logger.New(logger.Config{ + Level: cfg.Logging.Level, + Format: cfg.Logging.Format, + File: cfg.Logging.File, + }) + if err != nil { + die(exitConfig, fmt.Sprintf("initializing logger: %v", err), "check logging config in config.yaml") + } + slog.SetDefault(log) + + // Clean up leftover .old binary from a previous Windows update + if err := updater.CleanupOldBinary(); err != nil { + log.Warn("failed to clean up old binary after update", "error", err) + } + + // Create data directory if it doesn't exist + if err := cfg.EnsureDataDir(); err != nil { + die(exitPermission, fmt.Sprintf("creating data directory: %v", err), "check permissions on ~/.mnemonic/") + } + + // Pre-migration safety backup (only if DB exists AND schema is outdated) + if _, statErr := os.Stat(cfg.Store.DBPath); statErr == nil { + currentVer, verErr := backup.ReadSchemaVersion(cfg.Store.DBPath) + if verErr != nil { + log.Warn("could not read schema version, will back up defensively", "error", verErr) + currentVer = -1 // force backup + } + if currentVer < sqlite.SchemaVersion { + backupDir, bdErr := backup.EnsureBackupDir() + if bdErr != nil { + log.Warn("could not create backup directory for pre-migration backup", "error", bdErr) + } else { + bkPath, bkErr := backup.BackupSQLiteFile(cfg.Store.DBPath, backupDir) + if bkErr != nil { + log.Warn("pre-migration backup failed", "error", bkErr) + } else if bkPath != "" { + log.Info("pre-migration backup created", "path", bkPath) + } + if pruneErr := backup.PruneOldBackups(backupDir, 3); pruneErr != nil { + log.Warn("failed to prune old backups", "error", pruneErr) + } + } + } else { + log.Debug("schema is current, skipping pre-migration backup") + } + } + + // Open SQLite store + memStore, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err != nil { + die(exitDatabase, fmt.Sprintf("opening database %s: %v", cfg.Store.DBPath, err), "mnemonic diagnose") + } + + // Run integrity check on startup + intCtx, intCancel := context.WithTimeout(context.Background(), 30*time.Second) + if intErr := memStore.CheckIntegrity(intCtx); intErr != nil { + log.Error("database integrity check failed", "error", intErr) + fmt.Fprintf(os.Stderr, "\n%s✗ DATABASE CORRUPTION DETECTED%s\n", colorRed, colorReset) + fmt.Fprintf(os.Stderr, " %v\n", intErr) + fmt.Fprintf(os.Stderr, " A pre-migration backup was saved. Use 'mnemonic restore ' to recover.\n\n") + } else { + log.Info("database integrity check passed") + } + intCancel() + + // Check available disk space + dbDir := filepath.Dir(cfg.Store.DBPath) + if availBytes, diskErr := diskAvailable(dbDir); diskErr == nil { + availMB := availBytes / (1024 * 1024) + if availMB < 100 { + log.Error("critically low disk space", "available_mb", availMB, "path", dbDir) + fmt.Fprintf(os.Stderr, "\n%s✗ CRITICALLY LOW DISK SPACE: %d MB available%s\n", colorRed, availMB, colorReset) + fmt.Fprintf(os.Stderr, " Database writes may fail. Free up disk space before continuing.\n\n") + } else if availMB < 500 { + log.Warn("low disk space", "available_mb", availMB, "path", dbDir) + fmt.Fprintf(os.Stderr, "\n%s⚠ Low disk space: %d MB available%s\n", colorYellow, availMB, colorReset) + } + } + + // Create LLM provider + llmProvider := newLLMProvider(cfg) + + // Check for embedding model drift + embModel := cfg.LLM.EmbeddingModel + if cfg.LLM.Provider == "embedded" && cfg.LLM.Embedded.EmbedModelFile != "" { + embModel = cfg.LLM.Embedded.EmbedModelFile + } + if embModel != "" { + metaCtx, metaCancel := context.WithTimeout(context.Background(), 5*time.Second) + prevModel, _ := memStore.GetMeta(metaCtx, "embedding_model") + metaCancel() + + if prevModel != "" && prevModel != embModel { + log.Warn("embedding model changed", "previous", prevModel, "current", embModel) + fmt.Fprintf(os.Stderr, "\n%s⚠ Embedding model changed: %s → %s%s\n", colorYellow, prevModel, embModel, colorReset) + fmt.Fprintf(os.Stderr, " Existing semantic search may return degraded results.\n") + fmt.Fprintf(os.Stderr, " Old embeddings are from a different vector space.\n\n") + } + + metaCtx2, metaCancel2 := context.WithTimeout(context.Background(), 5*time.Second) + _ = memStore.SetMeta(metaCtx2, "embedding_model", embModel) + metaCancel2() + } + + // Detect version changes and create a memory for release awareness + if Version != "" { + verCtx, verCancel := context.WithTimeout(context.Background(), 5*time.Second) + prevVersion, _ := memStore.GetMeta(verCtx, "daemon_version") + verCancel() + + if prevVersion != "" && prevVersion != Version { + log.Info("version changed", "previous", prevVersion, "current", Version) + raw := store.RawMemory{ + ID: uuid.New().String(), + Source: "system", + Type: "version_change", + Content: fmt.Sprintf("Mnemonic updated from %s to %s", prevVersion, Version), + Timestamp: time.Now(), + Project: "mnemonic", + InitialSalience: 0.7, + } + writeCtx, writeCancel := context.WithTimeout(context.Background(), 5*time.Second) + if err := memStore.WriteRaw(writeCtx, raw); err != nil { + log.Warn("failed to record version change", "error", err) + } else { + log.Info("recorded version change memory", "from", prevVersion, "to", Version) + } + writeCancel() + } + + setCtx, setCancel := context.WithTimeout(context.Background(), 5*time.Second) + _ = memStore.SetMeta(setCtx, "daemon_version", Version) + setCancel() + } + + // Create event bus + bus := events.NewInMemoryBus(bufferSize) + defer func() { _ = bus.Close() }() + + // Check LLM health (warn loudly if unavailable, don't fail startup) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.LLM.TimeoutSec)*time.Second) + if err := llmProvider.Health(ctx); err != nil { + log.Warn("LLM provider unavailable at startup", "endpoint", cfg.LLM.Endpoint, "error", err) + fmt.Fprintf(os.Stderr, "\n%s⚠ WARNING: LLM provider is not reachable at %s%s\n", colorYellow, cfg.LLM.Endpoint, colorReset) + fmt.Fprintf(os.Stderr, " Memory encoding will not work until the LLM provider is running.\n") + fmt.Fprintf(os.Stderr, " Raw observations will queue and be processed once the LLM provider is available.\n") + fmt.Fprintf(os.Stderr, " Run 'mnemonic diagnose' for a full health check.\n\n") + } + cancel() + + // Log startup info + embCount, embLoadTime := memStore.EmbeddingIndexStats() + log.Info("mnemonic daemon starting", + "version", Version, + "config_path", configPath, + "db_path", cfg.Store.DBPath, + "llm_endpoint", cfg.LLM.Endpoint, + "llm_chat_model", cfg.LLM.ChatModel, + "llm_embedding_model", cfg.LLM.EmbeddingModel, + "embedding_index_size", embCount, + "embedding_index_load_ms", embLoadTime.Milliseconds(), + ) + if embCount > 50000 { + log.Warn("large embedding index — consider ANN index for better performance", + "count", embCount, "load_ms", embLoadTime.Milliseconds()) + } + + // Create a root context for all agents + rootCtx, rootCancel := context.WithCancel(context.Background()) + defer rootCancel() + + // Instrumented provider wrapper — gives each agent its own usage tracking. + // If training data capture is enabled, wrap with TrainingCaptureProvider too. + modelLabel := cfg.LLM.ChatModel + if cfg.LLM.Provider == "embedded" && cfg.LLM.Embedded.ChatModelFile != "" { + modelLabel = cfg.LLM.Embedded.ChatModelFile + } + wrap := func(caller string) llm.Provider { + var p llm.Provider = llm.NewInstrumentedProvider(llmProvider, memStore, caller, modelLabel) + if cfg.Training.CaptureEnabled && cfg.Training.CaptureDir != "" { + p = llm.NewTrainingCaptureProvider(p, caller, cfg.Training.CaptureDir) + } + return p + } + + // --- Start episoding agent (groups raw events into episodes) --- + var episodingAgent *episoding.EpisodingAgent + if cfg.Episoding.Enabled { + pollingInterval := time.Duration(cfg.Episoding.PollingIntervalSec) * time.Second + if pollingInterval <= 0 { + pollingInterval = 10 * time.Second + } + episodingCfg := episoding.EpisodingConfig{ + EpisodeWindowSizeMin: cfg.Episoding.EpisodeWindowSizeMin, + MinEventsPerEpisode: cfg.Episoding.MinEventsPerEpisode, + PollingInterval: pollingInterval, + StartupLookback: cfg.Episoding.StartupLookback, + DefaultSalience: cfg.Episoding.DefaultSalience, + } + episodingAgent = episoding.NewEpisodingAgent(memStore, wrap("episoding"), log, episodingCfg) + if err := episodingAgent.Start(rootCtx, bus); err != nil { + log.Error("failed to start episoding agent", "error", err) + } else { + log.Info("episoding agent started") + } + } + + // --- Start encoding agent --- + var encoder *encoding.EncodingAgent + if cfg.Encoding.Enabled { + encoder = encoding.NewEncodingAgentWithConfig(memStore, wrap("encoding"), log, buildEncodingConfig(cfg)) + if err := encoder.Start(rootCtx, bus); err != nil { + log.Error("failed to start encoding agent", "error", err) + } else { + log.Info("encoding agent started") + } + } + + // --- Build watchers based on config --- + var watchers []watcher.Watcher + var percAgent *perception.PerceptionAgent + + if cfg.Perception.Enabled { + if cfg.Perception.Filesystem.Enabled { + // Auto-detect noisy app directories and merge with configured exclusions + autoExclusions := fswatcher.DetectNoisyApps(log) + allExclusions := cfg.Perception.Filesystem.ExcludePatterns + for _, pattern := range autoExclusions { + if !fswatcher.MatchesExcludePattern(pattern, allExclusions) { + allExclusions = append(allExclusions, pattern) + } + } + + fsw, err := fswatcher.NewFilesystemWatcher(fswatcher.Config{ + WatchDirs: cfg.Perception.Filesystem.WatchDirs, + ExcludePatterns: allExclusions, + SensitivePatterns: cfg.Perception.Filesystem.SensitivePatterns, + MaxContentBytes: cfg.Perception.Filesystem.MaxContentBytes, + MaxWatches: cfg.Perception.Filesystem.MaxWatches, + ShallowDepth: cfg.Perception.Filesystem.ShallowDepth, + PollIntervalSec: cfg.Perception.Filesystem.PollIntervalSec, + PromotionThreshold: cfg.Perception.Filesystem.PromotionThreshold, + DemotionTimeoutMin: cfg.Perception.Filesystem.DemotionTimeoutMin, + }, log) + if err != nil { + log.Error("failed to create filesystem watcher", "error", err) + } else { + watchers = append(watchers, fsw) + log.Info("filesystem watcher configured", "dirs", cfg.Perception.Filesystem.WatchDirs) + } + } + + if cfg.Perception.Terminal.Enabled { + tw, err := termwatcher.NewTerminalWatcher(termwatcher.Config{ + Shell: cfg.Perception.Terminal.Shell, + PollIntervalSec: cfg.Perception.Terminal.PollIntervalSec, + ExcludePatterns: cfg.Perception.Terminal.ExcludePatterns, + }, log) + if err != nil { + log.Error("failed to create terminal watcher", "error", err) + } else { + watchers = append(watchers, tw) + log.Info("terminal watcher configured", "shell", cfg.Perception.Terminal.Shell) + } + } + + if cfg.Perception.Clipboard.Enabled { + cw, err := clipwatcher.NewClipboardWatcher(clipwatcher.Config{ + PollIntervalSec: cfg.Perception.Clipboard.PollIntervalSec, + MaxContentBytes: cfg.Perception.Clipboard.MaxContentBytes, + }, log) + if err != nil { + log.Error("failed to create clipboard watcher", "error", err) + } else { + watchers = append(watchers, cw) + log.Info("clipboard watcher configured") + } + } + + if cfg.Perception.Git.Enabled { + gw, err := gitwatcher.NewGitWatcher(gitwatcher.Config{ + WatchDirs: cfg.Perception.Filesystem.WatchDirs, + PollIntervalSec: cfg.Perception.Git.PollIntervalSec, + MaxRepoDepth: cfg.Perception.Git.MaxRepoDepth, + }, log) + if err != nil { + log.Warn("git watcher not available", "error", err) + } else { + watchers = append(watchers, gw) + log.Info("git watcher configured") + } + } + + // --- Start perception agent --- + if len(watchers) > 0 { + percAgent = perception.NewPerceptionAgent( + watchers, + memStore, + wrap("perception"), + perception.PerceptionConfig{ + HeuristicConfig: perception.HeuristicConfig{ + MinContentLength: cfg.Perception.Heuristics.MinContentLength, + MaxContentLength: cfg.Perception.Heuristics.MaxContentLength, + FrequencyThreshold: cfg.Perception.Heuristics.FrequencyThreshold, + FrequencyWindowMin: cfg.Perception.Heuristics.FrequencyWindowMin, + PassScore: float32(cfg.Perception.HeuristicPassScore), + BatchEditWindowSec: cfg.Perception.BatchEditWindowSec, + BatchEditThreshold: cfg.Perception.BatchEditThreshold, + RecallBoostMax: float32(cfg.Perception.RecallBoostMax), + RecallBoostMinutes: cfg.Perception.RecallBoostWindowMin, + ExtraIgnoredPatterns: cfg.Perception.Heuristics.ExtraIgnoredPatterns, + ExtraLockfileNames: cfg.Perception.Heuristics.ExtraLockfileNames, + ExtraAppInternalDirs: cfg.Perception.Heuristics.ExtraAppInternalDirs, + ExtraSensitiveNames: cfg.Perception.Heuristics.ExtraSensitiveNames, + ExtraSourceExtensions: cfg.Perception.Heuristics.ExtraSourceExtensions, + ExtraTrivialCommands: cfg.Perception.Heuristics.ExtraTrivialCommands, + ExtraHighSignalCommands: cfg.Perception.Heuristics.ExtraHighSignalCommands, + ExtraCodeIndicators: cfg.Perception.Heuristics.ExtraCodeIndicators, + ExtraHighSignalKeywords: cfg.Perception.Heuristics.ExtraHighSignalKeywords, + ExtraMediumKeywords: cfg.Perception.Heuristics.ExtraMediumKeywords, + ExtraLowKeywords: cfg.Perception.Heuristics.ExtraLowKeywords, + Scoring: perception.ScoringConfig{ + BaseFilesystem: cfg.Perception.Scoring.BaseFilesystem, + BaseTerminal: cfg.Perception.Scoring.BaseTerminal, + BaseClipboard: cfg.Perception.Scoring.BaseClipboard, + BaseMCP: cfg.Perception.Scoring.BaseMCP, + BoostErrorLog: cfg.Perception.Scoring.BoostErrorLog, + BoostConfig: cfg.Perception.Scoring.BoostConfig, + BoostSourceCode: cfg.Perception.Scoring.BoostSourceCode, + BoostCommand: cfg.Perception.Scoring.BoostCommand, + BoostCodeSnippet: cfg.Perception.Scoring.BoostCodeSnippet, + KeywordHigh: cfg.Perception.Scoring.KeywordHigh, + KeywordMedium: cfg.Perception.Scoring.KeywordMedium, + KeywordLow: cfg.Perception.Scoring.KeywordLow, + }, + }, + LLMGatingEnabled: cfg.Perception.LLMGatingEnabled, + LearnedExclusionsPath: cfg.Perception.LearnedExclusionsPath, + ProjectResolver: projectResolver, + ContentDedupTTLSec: cfg.Perception.ContentDedupTTLSec, + GitOpCooldownSec: cfg.Perception.GitOpCooldownSec, + MaxRawContentLen: cfg.Perception.MaxRawContentLen, + LLMGateSnippetLen: cfg.Perception.LLMGateSnippetLen, + LLMGateTimeoutSec: cfg.Perception.LLMGateTimeoutSec, + RejectionThreshold: cfg.Perception.RejectionThreshold, + RejectionWindowMin: cfg.Perception.RejectionWindowMin, + RejectionMaxPromoted: cfg.Perception.RejectionMaxPromoted, + }, + log, + ) + if err := percAgent.Start(rootCtx, bus); err != nil { + log.Error("failed to start perception agent", "error", err) + } else { + log.Info("perception agent started", "watchers", len(watchers)) + } + } + } + + // --- Create retrieval agent for API queries --- + retriever := retrieval.NewRetrievalAgent(memStore, wrap("retrieval"), buildRetrievalConfig(cfg), log, bus) + + // --- Start consolidation agent --- + var consolidator *consolidation.ConsolidationAgent + if cfg.Consolidation.Enabled { + consolidator = consolidation.NewConsolidationAgent(memStore, wrap("consolidation"), toConsolidationConfig(cfg), log) + + if err := consolidator.Start(rootCtx, bus); err != nil { + log.Error("failed to start consolidation agent", "error", err) + } else { + log.Info("consolidation agent started", "interval", cfg.Consolidation.Interval) + } + } + + // --- Start metacognition agent --- + var metaAgent *metacognition.MetacognitionAgent + if cfg.Metacognition.Enabled { + metaAgent = metacognition.NewMetacognitionAgent(memStore, wrap("metacognition"), metacognition.MetacognitionConfig{ + Interval: cfg.Metacognition.Interval, + StartupDelay: time.Duration(cfg.Metacognition.StartupDelaySec) * time.Second, + ReflectionLookback: cfg.Metacognition.ReflectionLookback, + DeadMemoryWindow: cfg.Metacognition.DeadMemoryWindow, + }, log) + + if err := metaAgent.Start(rootCtx, bus); err != nil { + log.Error("failed to start metacognition agent", "error", err) + } else { + log.Info("metacognition agent started", "interval", cfg.Metacognition.Interval) + } + } + + // --- Start dreaming agent --- + var dreamer *dreaming.DreamingAgent + if cfg.Dreaming.Enabled { + dreamer = dreaming.NewDreamingAgent(memStore, wrap("dreaming"), dreaming.DreamingConfig{ + Interval: cfg.Dreaming.Interval, + BatchSize: cfg.Dreaming.BatchSize, + SalienceThreshold: cfg.Dreaming.SalienceThreshold, + AssociationBoostFactor: cfg.Dreaming.AssociationBoostFactor, + NoisePruneThreshold: cfg.Dreaming.NoisePruneThreshold, + StartupDelay: time.Duration(cfg.Dreaming.StartupDelaySec) * time.Second, + DeadMemoryWindow: cfg.Dreaming.DeadMemoryWindow, + InsightsBudget: cfg.Dreaming.InsightsBudget, + DefaultConfidence: cfg.Dreaming.DefaultConfidence, + }, log) + + if err := dreamer.Start(rootCtx, bus); err != nil { + log.Error("failed to start dreaming agent", "error", err) + } else { + log.Info("dreaming agent started", "interval", cfg.Dreaming.Interval) + } + } + + // --- Start abstraction agent --- + var abstractionAgent *abstraction.AbstractionAgent + if cfg.Abstraction.Enabled { + abstractionAgent = abstraction.NewAbstractionAgent(memStore, wrap("abstraction"), abstraction.AbstractionConfig{ + Interval: cfg.Abstraction.Interval, + MinStrength: cfg.Abstraction.MinStrength, + MaxLLMCalls: cfg.Abstraction.MaxLLMCalls, + StartupDelay: time.Duration(cfg.Abstraction.StartupDelaySec) * time.Second, + DefaultConfidence: cfg.Abstraction.DefaultConfidence, + PatternAxiomConfidence: cfg.Abstraction.PatternAxiomConfidence, + ConfidenceModerateDecay: cfg.Abstraction.ConfidenceModerateDecay, + ConfidenceSignificantDecay: cfg.Abstraction.ConfidenceSignificantDecay, + ConfidenceSevereDecay: cfg.Abstraction.ConfidenceSevereDecay, + GroundingFloor: cfg.Abstraction.GroundingFloor, + }, log) + + if err := abstractionAgent.Start(rootCtx, bus); err != nil { + log.Error("failed to start abstraction agent", "error", err) + } else { + log.Info("abstraction agent started", "interval", cfg.Abstraction.Interval) + } + } + + // --- Start orchestrator (autonomous health monitoring and self-testing) --- + var orch *orchestrator.Orchestrator + if cfg.Orchestrator.Enabled { + orch = orchestrator.NewOrchestrator(memStore, wrap("orchestrator"), orchestrator.OrchestratorConfig{ + AdaptiveIntervals: cfg.Orchestrator.AdaptiveIntervals, + MaxDBSizeMB: cfg.Orchestrator.MaxDBSizeMB, + SelfTestInterval: cfg.Orchestrator.SelfTestInterval, + AutoRecovery: cfg.Orchestrator.AutoRecovery, + HealthReportPath: filepath.Join(filepath.Dir(cfg.Store.DBPath), "health.json"), + MonitorInterval: cfg.Orchestrator.MonitorInterval, + HealthReportInterval: cfg.Orchestrator.HealthReportInterval, + }, log) + + if err := orch.Start(rootCtx, bus); err != nil { + log.Error("failed to start orchestrator", "error", err) + } else { + log.Info("orchestrator started", + "monitor_interval", cfg.Orchestrator.MonitorInterval, + "self_test_interval", cfg.Orchestrator.SelfTestInterval) + } + } + + // --- Start reactor engine (centralized autonomous behavior coordination) --- + { + reactorLog := log.With("component", "reactor") + reactorEngine := reactor.NewEngine(memStore, bus, reactorLog) + + // Parse reactor cooldown overrides from config + var cooldownOverrides map[string]time.Duration + if len(cfg.Reactor.Cooldowns) > 0 { + cooldownOverrides = make(map[string]time.Duration, len(cfg.Reactor.Cooldowns)) + for chainID, durStr := range cfg.Reactor.Cooldowns { + d, err := time.ParseDuration(durStr) + if err != nil { + log.Warn("invalid reactor cooldown duration, ignoring", "chain_id", chainID, "value", durStr, "error", err) + continue + } + cooldownOverrides[chainID] = d + } + } + + deps := reactor.ChainDeps{ + MaxDBSizeMB: cfg.Orchestrator.MaxDBSizeMB, + CooldownOverrides: cooldownOverrides, + Logger: reactorLog, + } + if consolidator != nil { + deps.ConsolidationTrigger = consolidator.GetTriggerChannel() + } + if abstractionAgent != nil { + deps.AbstractionTrigger = abstractionAgent.GetTriggerChannel() + } + if metaAgent != nil { + deps.MetacognitionTrigger = metaAgent.GetTriggerChannel() + } + if dreamer != nil { + deps.DreamingTrigger = dreamer.GetTriggerChannel() + } + if orch != nil { + deps.IncrementAutonomous = orch.IncrementAutonomousCount + } + deps.ForumAgentPosting = cfg.Forum.AgentPosting + deps.ForumMentionResponses = cfg.Forum.MentionResponses + deps.ForumMentionMaxTokens = cfg.Forum.MentionMaxTokens + deps.ForumMentionTemp = cfg.Forum.MentionTemp + deps.ForumPerAgentSubforums = cfg.Forum.PerAgentSubforums + deps.ForumDigestPosting = cfg.Forum.DigestPosting + deps.MentionLLM = llmProvider + if retriever != nil { + deps.MentionQuery = retriever + } + + for _, chain := range reactor.NewChainRegistry(deps) { + reactorEngine.RegisterChain(chain) + } + + if err := reactorEngine.Start(rootCtx, bus); err != nil { + log.Error("failed to start reactor engine", "error", err) + } + } + + // --- Sync project forum categories --- + if n, err := memStore.SyncProjectCategories(rootCtx); err != nil { + log.Warn("failed to sync project categories", "error", err) + } else if n > 0 { + log.Info("created forum categories for projects", "count", n) + } + + // --- Backfill episode-memory links (fixes encoding/episoding race condition) --- + go func() { + if n, err := memStore.BackfillEpisodeMemoryLinks(rootCtx); err != nil { + log.Warn("failed to backfill episode memory links", "error", err) + } else if n > 0 { + log.Info("backfilled episode-memory links", "linked", n) + } + }() + + // --- Start API server --- + if cfg.API.Port > 0 { + apiDeps := api.ServerDeps{ + Store: memStore, + LLM: llmProvider, + Bus: bus, + Retriever: retriever, + IngestExcludePatterns: cfg.Perception.Filesystem.ExcludePatterns, + IngestMaxContentBytes: cfg.Perception.Filesystem.MaxContentBytes, + Version: Version, + ConfigPath: configPath, + ServiceRestarter: daemon.NewServiceManager(), + PIDRestart: daemon.PIDRestart, + MCPToolCount: mcp.ToolCount(), + StartTime: time.Now(), + Log: log, + } + // Only set Consolidator if it's non-nil (avoids Go nil-interface trap) + if consolidator != nil { + apiDeps.Consolidator = consolidator + } + if cfg.AgentSDK.Enabled && cfg.AgentSDK.EvolutionDir != "" { + apiDeps.AgentEvolutionDir = cfg.AgentSDK.EvolutionDir + apiDeps.AgentWebPort = cfg.AgentSDK.WebPort + } + + // Set API routes memory defaults from config + routes.FeedbackStrengthDelta = cfg.MemoryDefaults.FeedbackStrengthDelta + routes.FeedbackSalienceBoost = cfg.MemoryDefaults.FeedbackSalienceBoost + routes.InitialSalienceForType = func(memType string) float32 { + return cfg.MemoryDefaults.SalienceForType(memType) + } + + apiServer := api.NewServer(api.ServerConfig{ + Host: cfg.API.Host, + Port: cfg.API.Port, + RequestTimeoutSec: cfg.API.RequestTimeoutSec, + Token: cfg.API.Token, + AllowedOrigins: cfg.API.AllowedOrigins, + }, apiDeps) + + if err := apiServer.Start(); err != nil { + log.Error("failed to start API server", "error", err) + } else { + log.Info("API server started", "addr", fmt.Sprintf("%s:%d", cfg.API.Host, cfg.API.Port)) + defer func() { + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + _ = apiServer.Stop(shutdownCtx) + }() + } + } + + // --- Start agent web server (Python WebSocket) --- + agentWebCmd, agentWebDone := startAgentWebServer(cfg, log) + + // Set up signal handling for graceful shutdown + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, shutdownSignals()...) + + // Block until signal received + sig := <-sigChan + log.Info("shutdown signal received", "signal", sig.String()) + + // Graceful shutdown: cancel root context to stop all agents + rootCancel() + + // Stop agent web server if running. Use agentWebDone (owned by the + // background goroutine) instead of calling cmd.Wait() a second time. + if agentWebCmd != nil && agentWebCmd.Process != nil { + log.Info("stopping agent web server", "pid", agentWebCmd.Process.Pid) + // On Unix, send SIGTERM for graceful shutdown. On Windows, SIGTERM + // is not supported — go straight to Kill(). + if runtime.GOOS != "windows" { + if err := agentWebCmd.Process.Signal(syscall.SIGTERM); err != nil { + log.Warn("failed to send SIGTERM to agent web server", "error", err) + _ = agentWebCmd.Process.Kill() + } + } else { + _ = agentWebCmd.Process.Kill() + } + select { + case <-agentWebDone: + case <-time.After(5 * time.Second): + log.Warn("agent web server did not exit in 5s, killing") + _ = agentWebCmd.Process.Kill() + } + } + + // Give agents a moment to drain + time.Sleep(500 * time.Millisecond) + + if orch != nil { + _ = orch.Stop() + } + if abstractionAgent != nil { + _ = abstractionAgent.Stop() + } + if dreamer != nil { + _ = dreamer.Stop() + } + if metaAgent != nil { + _ = metaAgent.Stop() + } + if consolidator != nil { + _ = consolidator.Stop() + } + if encoder != nil { + _ = encoder.Stop() + } + if episodingAgent != nil { + _ = episodingAgent.Stop() + } + if percAgent != nil { + _ = percAgent.Stop() + } + + if err := bus.Close(); err != nil { + log.Error("error closing event bus", "error", err) + } + + if err := memStore.Close(); err != nil { + log.Error("error closing store", "error", err) + } + + log.Info("mnemonic daemon shutdown complete") +} diff --git a/cmd/mnemonic/status.go b/cmd/mnemonic/status.go new file mode 100644 index 00000000..aba12e6e --- /dev/null +++ b/cmd/mnemonic/status.go @@ -0,0 +1,251 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "time" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/store/sqlite" +) + +// statusCommand displays comprehensive system status. +func statusCommand(configPath string) { + svc := daemon.NewServiceManager() + + cfg, err := config.Load(configPath) + if err != nil { + // Even without config, show daemon state + fmt.Printf("%sMnemonic v%s Status%s\n\n", colorBold, Version, colorReset) + if svcRunning, svcPid := svc.IsRunning(); svcRunning { + fmt.Printf(" Daemon: %srunning%s (%s, PID %d)\n", colorGreen, colorReset, svc.ServiceName(), svcPid) + } else if running, pid := daemon.IsRunning(); running { + fmt.Printf(" Daemon: %srunning%s (PID %d)\n", colorGreen, colorReset, pid) + } else { + fmt.Printf(" Daemon: %sstopped%s\n", colorRed, colorReset) + } + fmt.Fprintf(os.Stderr, " (Config error: %v)\n", err) + return + } + + fmt.Printf("%sMnemonic v%s Status%s\n\n", colorBold, Version, colorReset) + + // Daemon state — check platform service first, then PID file + running := false + pid := 0 + mode := "" + if svcRunning, svcPid := svc.IsRunning(); svcRunning { + running, pid, mode = true, svcPid, fmt.Sprintf(" (%s)", svc.ServiceName()) + } else if pidRunning, pidPid := daemon.IsRunning(); pidRunning { + running, pid = true, pidPid + } + if running { + fmt.Printf(" Daemon: %srunning%s%s (PID %d)\n", colorGreen, colorReset, mode, pid) + } else { + fmt.Printf(" Daemon: %sstopped%s\n", colorRed, colorReset) + } + + // Try to get live status from the API + apiBase := fmt.Sprintf("http://%s:%d/api/v1", cfg.API.Host, cfg.API.Port) + apiReachable := false + + // Health check + healthResp, err := apiGet(apiBase+"/health", cfg.API.Token) + if err == nil { + defer func() { _ = healthResp.Body.Close() }() + if healthResp.StatusCode == 200 { + apiReachable = true + var health map[string]interface{} + if json.NewDecoder(healthResp.Body).Decode(&health) == nil { + llmStatus, _ := health["llm"].(string) + storeStatus, _ := health["store"].(string) + + llmColor := colorGreen + if llmStatus != "ok" { + llmColor = colorRed + } + storeColor := colorGreen + if storeStatus != "ok" { + storeColor = colorRed + } + + fmt.Printf(" API: %slistening%s on %s:%d\n", colorGreen, colorReset, cfg.API.Host, cfg.API.Port) + fmt.Printf(" LLM: %s%s%s (%s)\n", llmColor, llmStatus, colorReset, cfg.LLM.ChatModel) + fmt.Printf(" Store: %s%s%s\n", storeColor, storeStatus, colorReset) + } + } + } + + if !apiReachable { + fmt.Printf(" API: %sunreachable%s\n", colorRed, colorReset) + } + + // Memory stats — from API if available, else direct DB + fmt.Printf("\n %sMemory Store%s\n", colorBold, colorReset) + + if apiReachable { + statsResp, err := apiGet(apiBase+"/stats", cfg.API.Token) + if err == nil { + defer func() { _ = statsResp.Body.Close() }() + var data map[string]interface{} + if json.NewDecoder(statsResp.Body).Decode(&data) == nil { + s, _ := data["store"].(map[string]interface{}) + if s == nil { + s = data + } + total := intVal(s, "total_memories") + active := intVal(s, "active_memories") + fading := intVal(s, "fading_memories") + archived := intVal(s, "archived_memories") + merged := intVal(s, "merged_memories") + assoc := intVal(s, "total_associations") + dbSize := intVal(s, "storage_size_bytes") + + fmt.Printf(" Total: %d\n", total) + fmt.Printf(" Active: %s%d%s\n", colorGreen, active, colorReset) + fmt.Printf(" Fading: %s%d%s\n", colorYellow, fading, colorReset) + fmt.Printf(" Archived: %s%d%s\n", colorGray, archived, colorReset) + fmt.Printf(" Merged: %d\n", merged) + fmt.Printf(" Associations: %d\n", assoc) + fmt.Printf(" DB size: %.1f KB\n", float64(dbSize)/1024) + } + } + } else { + // Fall back to direct DB access + db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err == nil { + defer func() { _ = db.Close() }() + ctx := context.Background() + stats, err := db.GetStatistics(ctx) + if err == nil { + fmt.Printf(" Total: %d\n", stats.TotalMemories) + fmt.Printf(" Active: %s%d%s\n", colorGreen, stats.ActiveMemories, colorReset) + fmt.Printf(" Fading: %s%d%s\n", colorYellow, stats.FadingMemories, colorReset) + fmt.Printf(" Archived: %s%d%s\n", colorGray, stats.ArchivedMemories, colorReset) + fmt.Printf(" Merged: %d\n", stats.MergedMemories) + fmt.Printf(" Associations: %d\n", stats.TotalAssociations) + fmt.Printf(" DB size: %.1f KB\n", float64(stats.StorageSizeBytes)/1024) + } + } + } + + // Encoding queue depth — direct DB query + fmt.Printf("\n %sEncoding Queue%s\n", colorBold, colorReset) + { + db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err == nil { + defer func() { _ = db.Close() }() + ctx := context.Background() + var unprocessed int + row := db.DB().QueryRowContext(ctx, "SELECT COUNT(*) FROM raw_memories WHERE processed = 0") + if row.Scan(&unprocessed) == nil { + queueColor := colorGreen + queueNote := "" + if unprocessed > 500 { + queueColor = colorRed + queueNote = " (LLM may be down — run 'mnemonic diagnose')" + } else if unprocessed > 100 { + queueColor = colorYellow + queueNote = " (processing)" + } + fmt.Printf(" Unprocessed: %s%d%s%s\n", queueColor, unprocessed, colorReset, queueNote) + } + } + } + + // Consolidation status — check last consolidation from DB + fmt.Printf("\n %sConsolidation%s\n", colorBold, colorReset) + if cfg.Consolidation.Enabled { + fmt.Printf(" Enabled: yes (every %s)\n", cfg.Consolidation.IntervalRaw) + db, err := sqlite.NewSQLiteStore(cfg.Store.DBPath, cfg.Store.BusyTimeoutMs) + if err == nil { + defer func() { _ = db.Close() }() + lastConsolidation := getLastConsolidation(db) + if lastConsolidation != "" { + fmt.Printf(" Last run: %s\n", lastConsolidation) + } else { + fmt.Printf(" Last run: %snever%s\n", colorGray, colorReset) + } + } + } else { + fmt.Printf(" Enabled: no\n") + } + + // Perception config + fmt.Printf("\n %sPerception%s\n", colorBold, colorReset) + if cfg.Perception.Enabled { + if cfg.Perception.Filesystem.Enabled { + fmt.Printf(" Filesystem: %senabled%s (%d dirs)\n", colorGreen, colorReset, len(cfg.Perception.Filesystem.WatchDirs)) + } else { + fmt.Printf(" Filesystem: %sdisabled%s\n", colorGray, colorReset) + } + if cfg.Perception.Terminal.Enabled { + fmt.Printf(" Terminal: %senabled%s (poll %ds)\n", colorGreen, colorReset, cfg.Perception.Terminal.PollIntervalSec) + } else { + fmt.Printf(" Terminal: %sdisabled%s\n", colorGray, colorReset) + } + if cfg.Perception.Clipboard.Enabled { + fmt.Printf(" Clipboard: %senabled%s\n", colorGreen, colorReset) + } else { + fmt.Printf(" Clipboard: %sdisabled%s\n", colorGray, colorReset) + } + } else { + fmt.Printf(" All perception: %sdisabled%s\n", colorGray, colorReset) + } + + // Paths + fmt.Printf("\n %sPaths%s\n", colorBold, colorReset) + fmt.Printf(" Config: %s\n", configPath) + fmt.Printf(" Database: %s\n", cfg.Store.DBPath) + fmt.Printf(" Log: %s\n", daemon.LogPath()) + fmt.Printf(" PID: %s\n", daemon.PIDFilePath()) + fmt.Printf(" Dashboard: http://%s:%d\n", cfg.API.Host, cfg.API.Port) + fmt.Println() +} + +// intVal safely extracts an int from a JSON map. +func intVal(m map[string]interface{}, key string) int { + if v, ok := m[key]; ok { + switch n := v.(type) { + case float64: + return int(n) + case int: + return n + } + } + return 0 +} + +// getLastConsolidation queries for the last consolidation timestamp. +func getLastConsolidation(db *sqlite.SQLiteStore) string { + ctx := context.Background() + record, err := db.GetLastConsolidation(ctx) + if err != nil { + return "" + } + if record.ID == "" { + return "" + } + ago := time.Since(record.EndTime).Round(time.Minute) + return fmt.Sprintf("%s (%s ago, %d memories, %dms)", record.EndTime.Format("Jan 2 15:04"), formatDuration(ago), record.MemoriesProcessed, record.DurationMs) +} + +// formatDuration formats a duration as human-readable. +func formatDuration(d time.Duration) string { + if d < time.Minute { + return "just now" + } + if d < time.Hour { + mins := int(d.Minutes()) + return fmt.Sprintf("%dm", mins) + } + if d < 24*time.Hour { + hours := int(d.Hours()) + return fmt.Sprintf("%dh", hours) + } + days := int(d.Hours() / 24) + return fmt.Sprintf("%dd", days) +} diff --git a/cmd/mnemonic/update.go b/cmd/mnemonic/update.go new file mode 100644 index 00000000..7e8cfbcf --- /dev/null +++ b/cmd/mnemonic/update.go @@ -0,0 +1,90 @@ +package main + +import ( + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "time" + + "github.com/appsprout-dev/mnemonic/internal/daemon" + "github.com/appsprout-dev/mnemonic/internal/updater" +) + +func checkUpdateCommand() { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + fmt.Printf("Checking for updates...\n") + info, err := updater.CheckForUpdate(ctx, Version) + if err != nil { + die(exitNetwork, "Update check failed", err.Error()) + } + + if info.UpdateAvailable { + fmt.Printf("\n Current: v%s\n", info.CurrentVersion) + fmt.Printf(" Latest: %sv%s%s\n\n", colorGreen, info.LatestVersion, colorReset) + fmt.Printf(" Run %smnemonic update%s to install.\n", colorBold, colorReset) + fmt.Printf(" Release: %s\n", info.ReleaseURL) + } else { + fmt.Printf("\n %sYou're up to date!%s (v%s)\n", colorGreen, colorReset, info.CurrentVersion) + } +} + +func updateCommand() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + fmt.Printf("Checking for updates...\n") + info, err := updater.CheckForUpdate(ctx, Version) + if err != nil { + die(exitNetwork, "Update check failed", err.Error()) + } + + if !info.UpdateAvailable { + fmt.Printf("%sAlready up to date%s (v%s)\n", colorGreen, colorReset, info.CurrentVersion) + return + } + + fmt.Printf("Downloading v%s...\n", info.LatestVersion) + result, err := updater.PerformUpdate(ctx, info) + if err != nil { + die(exitGeneral, "Update failed", err.Error()) + } + + fmt.Printf("%sUpdated: v%s → v%s%s\n", colorGreen, result.PreviousVersion, result.NewVersion, colorReset) + + // Restart daemon if it's running + svc := daemon.NewServiceManager() + if svc.IsInstalled() { + running, _ := svc.IsRunning() + if running { + fmt.Printf("Restarting daemon...\n") + if err := svc.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "%sWarning:%s failed to stop daemon: %v\n", colorYellow, colorReset, err) + fmt.Printf("Restart manually: mnemonic restart\n") + return + } + time.Sleep(1 * time.Second) + if err := svc.Start(); err != nil { + fmt.Fprintf(os.Stderr, "%sWarning:%s failed to start daemon: %v\n", colorYellow, colorReset, err) + fmt.Printf("Start manually: mnemonic start\n") + return + } + fmt.Printf("%sDaemon restarted with v%s%s\n", colorGreen, result.NewVersion, colorReset) + } + } +} + +func generateTokenCommand() { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + fmt.Fprintf(os.Stderr, "Error generating token: %v\n", err) + os.Exit(1) + } + token := hex.EncodeToString(b) + fmt.Printf("Generated API token:\n\n %s\n\n", token) + fmt.Printf("Add this to your config.yaml:\n\n api:\n token: \"%s\"\n\n", token) + fmt.Printf("Then set this environment variable for CLI tools:\n\n export MNEMONIC_API_TOKEN=\"%s\"\n", token) +} diff --git a/cmd/mnemonic/util.go b/cmd/mnemonic/util.go new file mode 100644 index 00000000..bacd6969 --- /dev/null +++ b/cmd/mnemonic/util.go @@ -0,0 +1,9 @@ +package main + +// truncate shortens a string to maxLen with ellipsis. +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} diff --git a/cmd/mnemonic/watch.go b/cmd/mnemonic/watch.go new file mode 100644 index 00000000..f8437a0d --- /dev/null +++ b/cmd/mnemonic/watch.go @@ -0,0 +1,134 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/signal" + "time" + + "github.com/appsprout-dev/mnemonic/internal/config" + "github.com/gorilla/websocket" +) + +// watchCommand connects to the daemon's WebSocket and streams live events. +func watchCommand(configPath string) { + cfg, err := config.Load(configPath) + if err != nil { + die(exitConfig, fmt.Sprintf("loading config: %v", err), "mnemonic diagnose") + } + + wsURL := fmt.Sprintf("ws://%s:%d/ws", cfg.API.Host, cfg.API.Port) + + fmt.Printf("%sMnemonic Live Events%s — connecting to %s\n", colorBold, colorReset, wsURL) + fmt.Printf("Press Ctrl+C to stop.\n\n") + + // Connect to WebSocket + conn, _, err := websocket.DefaultDialer.Dial(wsURL, nil) + if err != nil { + die(exitNetwork, fmt.Sprintf("connecting to daemon: %v", err), "mnemonic start") + } + defer func() { _ = conn.Close() }() + + // Handle Ctrl+C + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, shutdownSignals()...) + + go func() { + <-sigChan + fmt.Printf("\n%sStopping event watch.%s\n", colorGray, colorReset) + _ = conn.Close() + os.Exit(0) + }() + + // Read and display events + for { + _, message, err := conn.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure) { + fmt.Println("Connection closed.") + } else { + fmt.Fprintf(os.Stderr, "\nWebSocket disconnected: %v\n", err) + } + return + } + + formatWatchEvent(message) + } +} + +// formatWatchEvent formats and prints a WebSocket event with colors. +func formatWatchEvent(data []byte) { + var evt map[string]interface{} + if err := json.Unmarshal(data, &evt); err != nil { + // Raw text event + ts := time.Now().Format("15:04:05") + fmt.Printf("%s%s%s %s\n", colorGray, ts, colorReset, string(data)) + return + } + + eventType, _ := evt["type"].(string) + ts := time.Now().Format("15:04:05") + + switch eventType { + case "raw_memory_created": + source, _ := evt["source"].(string) + id, _ := evt["id"].(string) + shortID := truncID(id) + fmt.Printf("%s%s%s %s▶ PERCEIVED%s [%s] %s\n", + colorGray, ts, colorReset, colorCyan, colorReset, source, shortID) + + case "memory_encoded": + id, _ := evt["id"].(string) + shortID := truncID(id) + fmt.Printf("%s%s%s %s▶ ENCODED%s %s\n", + colorGray, ts, colorReset, colorGreen, colorReset, shortID) + + case "consolidation_completed": + processed, _ := evt["memories_processed"].(float64) + decayed, _ := evt["memories_decayed"].(float64) + merged, _ := evt["merged_clusters"].(float64) + pruned, _ := evt["associations_pruned"].(float64) + durationMs, _ := evt["duration_ms"].(float64) + fmt.Printf("%s%s%s %s▶ CONSOLIDATED%s processed=%d decayed=%d merged=%d pruned=%d (%dms)\n", + colorGray, ts, colorReset, colorYellow, colorReset, + int(processed), int(decayed), int(merged), int(pruned), int(durationMs)) + + case "query_executed": + query, _ := evt["query"].(string) + results, _ := evt["result_count"].(float64) + took, _ := evt["took_ms"].(float64) + fmt.Printf("%s%s%s %s▶ QUERY%s \"%s\" → %d results (%dms)\n", + colorGray, ts, colorReset, colorBlue, colorReset, + query, int(results), int(took)) + + case "dream_cycle_completed": + replayed, _ := evt["memories_replayed"].(float64) + strengthened, _ := evt["associations_strengthened"].(float64) + newAssoc, _ := evt["new_associations_created"].(float64) + demoted, _ := evt["noisy_memories_demoted"].(float64) + durationMs, _ := evt["duration_ms"].(float64) + fmt.Printf("%s%s%s %s▶ DREAMED%s replayed=%d strengthened=%d new_assoc=%d demoted=%d (%dms)\n", + colorGray, ts, colorReset, colorCyan, colorReset, + int(replayed), int(strengthened), int(newAssoc), int(demoted), int(durationMs)) + + case "meta_cycle_completed": + observations, _ := evt["observations_logged"].(float64) + fmt.Printf("%s%s%s %s▶ META%s observations=%d\n", + colorGray, ts, colorReset, colorCyan, colorReset, int(observations)) + + default: + // Generic event + fmt.Printf("%s%s%s %s▶ %s%s %s\n", + colorGray, ts, colorReset, colorGray, eventType, colorReset, + string(data)) + } +} + +// truncID shortens a UUID for display. +func truncID(id string) string { + if len(id) > 8 { + return id[:8] + } + return id +} diff --git a/cmd/mnemonic/web.go b/cmd/mnemonic/web.go new file mode 100644 index 00000000..e391a376 --- /dev/null +++ b/cmd/mnemonic/web.go @@ -0,0 +1,124 @@ +package main + +import ( + "bytes" + "fmt" + "log/slog" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/appsprout-dev/mnemonic/internal/config" +) + +// startAgentWebServer starts the Python WebSocket agent server as a child process. +// Returns the started Cmd and a channel that receives the Wait() result when the +// process exits. The caller must use the channel instead of calling cmd.Wait() +// directly, since the background monitor goroutine owns the single Wait() call. +// Returns (nil, nil) if disabled or failed to start. +func startAgentWebServer(cfg *config.Config, log *slog.Logger) (*exec.Cmd, <-chan error) { + if !cfg.AgentSDK.Enabled || cfg.AgentSDK.EvolutionDir == "" { + return nil, nil + } + + port := cfg.AgentSDK.WebPort + if port == 0 { + port = 9998 + } + + // SDK directory: evolution_dir is sdk/agent/evolution, so sdk/ is two levels up. + sdkDir := filepath.Dir(filepath.Dir(cfg.AgentSDK.EvolutionDir)) + + // Determine python binary: prefer explicit config, then venv Python (has + // all SDK deps installed), then uv, then system python3/python. + pythonBin := cfg.AgentSDK.PythonBin + if pythonBin == "" { + // Venv layout differs by platform: bin/python3 (Unix) vs Scripts/python.exe (Windows) + venvPython := filepath.Join(sdkDir, ".venv", "bin", "python3") + if runtime.GOOS == "windows" { + venvPython = filepath.Join(sdkDir, ".venv", "Scripts", "python.exe") + } + if _, err := os.Stat(venvPython); err == nil { + pythonBin = venvPython + } else if uvPath, err := exec.LookPath("uv"); err == nil { + pythonBin = uvPath + } else if py3, err := exec.LookPath("python3"); err == nil { + pythonBin = py3 + } else if py, err := exec.LookPath("python"); err == nil { + // Windows typically has "python" not "python3" + pythonBin = py + } else { + log.Error("cannot find python3 or uv to start agent web server") + return nil, nil + } + } + + // Build command arguments. + var args []string + if strings.HasSuffix(filepath.Base(pythonBin), "uv") { + args = []string{"run", "python", "-m", "agent.web"} + } else { + args = []string{"-m", "agent.web"} + } + + // Resolve mnemonic binary and config paths relative to project root. + projectRoot := filepath.Dir(sdkDir) + binaryName := "mnemonic" + if runtime.GOOS == "windows" { + binaryName = "mnemonic.exe" + } + args = append(args, + "--port", fmt.Sprintf("%d", port), + "--mnemonic-config", filepath.Join(projectRoot, "config.yaml"), + "--mnemonic-binary", filepath.Join(projectRoot, "bin", binaryName), + ) + + cmd := exec.Command(pythonBin, args...) + cmd.Dir = sdkDir + + // Capture stderr so missing-dependency tracebacks don't pollute the console. + var stderrBuf bytes.Buffer + cmd.Stdout = os.Stdout + cmd.Stderr = &stderrBuf + + // Strip CLAUDECODE env var so the bundled Claude CLI doesn't refuse + // to start (nested session detection). + env := os.Environ() + filtered := env[:0] + for _, e := range env { + if !strings.HasPrefix(e, "CLAUDECODE=") { + filtered = append(filtered, e) + } + } + cmd.Env = filtered + + if err := cmd.Start(); err != nil { + log.Error("failed to start agent web server", "error", err, "python_bin", pythonBin) + return nil, nil + } + + log.Info("agent web server started", "pid", cmd.Process.Pid, "port", port, "sdk_dir", sdkDir) + + // Monitor the process in background — if it exits quickly, log a clean warning + // instead of dumping a raw Python traceback. This goroutine owns the single + // cmd.Wait() call; the done channel lets the shutdown path wait for exit + // without calling Wait() a second time (which would race). + done := make(chan error, 1) + go func() { + err := cmd.Wait() + if err != nil { + stderr := strings.TrimSpace(stderrBuf.String()) + if strings.Contains(stderr, "ModuleNotFoundError") || strings.Contains(stderr, "No module named") { + log.Warn("agent web server exited: missing Python dependency — install SDK requirements to enable", + "hint", "cd sdk && pip install -r requirements.txt") + } else { + log.Warn("agent web server exited unexpectedly", "error", err, "stderr", stderr) + } + } + done <- err + }() + + return cmd, done +}