From e5543e999c0a9818f3d9340946f82bb70aa437dd Mon Sep 17 00:00:00 2001 From: z23cc Date: Tue, 7 Apr 2026 23:28:08 +0800 Subject: [PATCH] =?UTF-8?q?feat(flowctl):=20remove=20libSQL/fastembed=20?= =?UTF-8?q?=E2=80=94=20pure=20file-based=20state=20[fn-17]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the entire async libSQL database layer with sync file-based I/O. All state now stored as JSON/JSONL files in .flow/, readable by MCP tools. Key changes: - Remove fastembed (128MB ONNX model) and all vector/embedding code - Remove libsql, tokio, tokio-util dependencies - Rewrite flowctl-db: 5,800 lines of async SQL → sync file wrappers - Delete db_shim.rs (492-line async-to-sync bridge) - Expand json_store with events JSONL, pipeline/phases/locks JSON, memory JSONL with text search - FlowStore struct as unified entry point for all file operations Results: - Binary: 22MB → 2.7MB (88% reduction) - Dependencies: zero async runtime, zero database - All state visible to MCP tools (Read, Grep, Glob) - 299 tests pass Co-Authored-By: Claude Opus 4.6 (1M context) --- CLAUDE.md | 4 +- flowctl/Cargo.lock | 2351 +---------------- flowctl/Cargo.toml | 14 - flowctl/crates/flowctl-cli/Cargo.toml | 2 - .../flowctl-cli/src/commands/admin/config.rs | 13 +- .../src/commands/admin/exchange.rs | 88 +- .../flowctl-cli/src/commands/admin/init.rs | 44 +- .../flowctl-cli/src/commands/admin/status.rs | 57 +- .../flowctl-cli/src/commands/approval.rs | 92 +- .../flowctl-cli/src/commands/checkpoint.rs | 93 +- .../flowctl-cli/src/commands/db_shim.rs | 492 ---- .../crates/flowctl-cli/src/commands/gap.rs | 239 +- .../flowctl-cli/src/commands/helpers.rs | 58 +- .../crates/flowctl-cli/src/commands/log.rs | 123 +- .../crates/flowctl-cli/src/commands/mod.rs | 1 - .../crates/flowctl-cli/src/commands/query.rs | 101 +- .../flowctl-cli/src/commands/scout_cache.rs | 103 +- .../crates/flowctl-cli/src/commands/skill.rs | 102 +- .../crates/flowctl-cli/src/commands/stats.rs | 362 +-- .../src/commands/workflow/lifecycle.rs | 77 +- .../flowctl-cli/src/commands/workflow/mod.rs | 35 - .../src/commands/workflow/phase.rs | 32 +- .../src/commands/workflow/pipeline_phase.rs | 86 +- .../flowctl-cli/tests/export_import_test.rs | 81 +- .../flowctl-cli/tests/integration_test.rs | 2 +- flowctl/crates/flowctl-core/src/changes.rs | 2 +- flowctl/crates/flowctl-core/src/json_store.rs | 464 +++- flowctl/crates/flowctl-db/Cargo.toml | 14 +- .../crates/flowctl-db/benches/event_store.rs | 68 - flowctl/crates/flowctl-db/src/approvals.rs | 52 + flowctl/crates/flowctl-db/src/error.rs | 56 +- flowctl/crates/flowctl-db/src/events.rs | 363 +-- flowctl/crates/flowctl-db/src/gaps.rs | 68 + flowctl/crates/flowctl-db/src/indexer.rs | 521 ---- flowctl/crates/flowctl-db/src/lib.rs | 57 +- flowctl/crates/flowctl-db/src/locks.rs | 122 + flowctl/crates/flowctl-db/src/memory.rs | 595 +---- flowctl/crates/flowctl-db/src/metrics.rs | 514 ---- flowctl/crates/flowctl-db/src/migration.rs | 272 -- flowctl/crates/flowctl-db/src/phases.rs | 70 + flowctl/crates/flowctl-db/src/pipeline.rs | 52 + flowctl/crates/flowctl-db/src/pool.rs | 326 --- flowctl/crates/flowctl-db/src/repo/deps.rs | 124 - flowctl/crates/flowctl-db/src/repo/epic.rs | 214 -- flowctl/crates/flowctl-db/src/repo/event.rs | 118 - .../crates/flowctl-db/src/repo/event_store.rs | 343 --- .../crates/flowctl-db/src/repo/evidence.rs | 99 - .../crates/flowctl-db/src/repo/file_lock.rs | 331 --- .../flowctl-db/src/repo/file_ownership.rs | 66 - flowctl/crates/flowctl-db/src/repo/gap.rs | 158 -- flowctl/crates/flowctl-db/src/repo/helpers.rs | 107 - flowctl/crates/flowctl-db/src/repo/mod.rs | 635 ----- .../flowctl-db/src/repo/phase_progress.rs | 64 - flowctl/crates/flowctl-db/src/repo/runtime.rs | 84 - .../crates/flowctl-db/src/repo/scout_cache.rs | 224 -- flowctl/crates/flowctl-db/src/repo/task.rs | 312 --- flowctl/crates/flowctl-db/src/schema.sql | 313 --- flowctl/crates/flowctl-db/src/skill.rs | 273 -- flowctl/crates/flowctl-db/src/store.rs | 104 + flowctl/crates/flowctl-service/Cargo.toml | 4 - .../crates/flowctl-service/src/approvals.rs | 458 +--- flowctl/crates/flowctl-service/src/changes.rs | 100 +- .../crates/flowctl-service/src/connection.rs | 72 - flowctl/crates/flowctl-service/src/lib.rs | 9 +- .../crates/flowctl-service/src/lifecycle.rs | 181 +- flowctl/crates/flowctl-service/src/outputs.rs | 2 +- flowctl/tests/cmd/next_json.toml | 2 +- 67 files changed, 1961 insertions(+), 10704 deletions(-) delete mode 100644 flowctl/crates/flowctl-cli/src/commands/db_shim.rs delete mode 100644 flowctl/crates/flowctl-db/benches/event_store.rs create mode 100644 flowctl/crates/flowctl-db/src/approvals.rs create mode 100644 flowctl/crates/flowctl-db/src/gaps.rs delete mode 100644 flowctl/crates/flowctl-db/src/indexer.rs create mode 100644 flowctl/crates/flowctl-db/src/locks.rs delete mode 100644 flowctl/crates/flowctl-db/src/metrics.rs delete mode 100644 flowctl/crates/flowctl-db/src/migration.rs create mode 100644 flowctl/crates/flowctl-db/src/phases.rs create mode 100644 flowctl/crates/flowctl-db/src/pipeline.rs delete mode 100644 flowctl/crates/flowctl-db/src/pool.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/deps.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/epic.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/event.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/event_store.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/evidence.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/file_lock.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/file_ownership.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/gap.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/helpers.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/mod.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/phase_progress.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/runtime.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/scout_cache.rs delete mode 100644 flowctl/crates/flowctl-db/src/repo/task.rs delete mode 100644 flowctl/crates/flowctl-db/src/schema.sql delete mode 100644 flowctl/crates/flowctl-db/src/skill.rs create mode 100644 flowctl/crates/flowctl-db/src/store.rs delete mode 100644 flowctl/crates/flowctl-service/src/connection.rs diff --git a/CLAUDE.md b/CLAUDE.md index 69f24821..81583389 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## What Is This -Flow-Code is a Claude Code plugin for structured, plan-first development. It provides a unified entry point (`/flow-code:run`) plus individual slash commands, skills, and agents that orchestrate task tracking via a `.flow/` directory. Core engine is a Rust binary (`flowctl`) with libSQL storage (async, native vector search) and MCP server support. +Flow-Code is a Claude Code plugin for structured, plan-first development. It provides a unified entry point (`/flow-code:run`) plus individual slash commands, skills, and agents that orchestrate task tracking via a `.flow/` directory. Core engine is a Rust binary (`flowctl`) with file-based JSON storage and MCP server support. ## Core Architecture @@ -52,7 +52,7 @@ bash scripts/ralph_e2e_short_rp_test.sh All tests create temp directories and clean up after themselves. They must NOT be run from the plugin repo root (safety check enforced). -**Storage runtime**: flowctl is libSQL-only (async, native vector search via `F32_BLOB(384)`). The `flowctl-db` crate was rewritten from rusqlite to libsql in fn-19 and is the sole storage crate. First build downloads the fastembed ONNX model (~130MB) to `.fastembed_cache/` for semantic memory search; subsequent builds/tests reuse the cache. +**Storage runtime**: State is stored in JSON/JSONL files in the `.flow/` directory, readable by any tool. The `flowctl-db` crate provides synchronous file-based storage with no external database dependencies. ## Code Quality diff --git a/flowctl/Cargo.lock b/flowctl/Cargo.lock index 47acdfbf..2c710c0d 100644 --- a/flowctl/Cargo.lock +++ b/flowctl/Cargo.lock @@ -26,9 +26,8 @@ dependencies = [ "cfg-if", "getrandom 0.3.4", "once_cell", - "serde", "version_check", - "zerocopy 0.8.48", + "zerocopy", ] [[package]] @@ -40,12 +39,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -55,12 +48,6 @@ dependencies = [ "libc", ] -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - [[package]] name = "annotate-snippets" version = "0.12.5" @@ -157,23 +144,6 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" -[[package]] -name = "async-trait" -version = "0.1.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "autocfg" version = "1.5.0" @@ -215,41 +185,12 @@ dependencies = [ "backtrace", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" -[[package]] -name = "bindgen" -version = "0.66.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn", - "which 4.4.2", -] - [[package]] name = "bitflags" version = "2.11.0" @@ -271,36 +212,6 @@ version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" -dependencies = [ - "serde", -] - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "castaway" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" -dependencies = [ - "rustversion", -] - [[package]] name = "cc" version = "1.2.59" @@ -311,27 +222,12 @@ dependencies = [ "shlex", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - [[package]] name = "chrono" version = "0.4.44" @@ -346,44 +242,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.6.0" @@ -433,49 +291,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" -[[package]] -name = "cmake" -version = "0.1.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678" -dependencies = [ - "cc", -] - [[package]] name = "colorchoice" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" -[[package]] -name = "compact_str" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a" -dependencies = [ - "castaway", - "cfg-if", - "itoa", - "rustversion", - "ryu", - "serde", - "static_assertions", -] - -[[package]] -name = "console" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width 0.2.0", - "windows-sys 0.59.0", -] - [[package]] name = "content_inspector" version = "0.2.4" @@ -500,53 +321,6 @@ dependencies = [ "libc", ] -[[package]] -name = "crc32fast" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "criterion" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" -dependencies = [ - "anes", - "cast", - "ciborium", - "clap", - "criterion-plot", - "futures", - "is-terminal", - "itertools 0.10.5", - "num-traits", - "once_cell", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "tokio", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools 0.10.5", -] - [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -572,12 +346,6 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" - [[package]] name = "crypto-common" version = "0.1.7" @@ -588,81 +356,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core", - "quote", - "syn", -] - -[[package]] -name = "dary_heap" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d2e3287df1c007e74221c49ca10a95d557349e54b3a75dc2fb14712c751f04" -dependencies = [ - "serde", -] - -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn", -] - [[package]] name = "digest" version = "0.10.7" @@ -673,38 +366,6 @@ dependencies = [ "crypto-common", ] -[[package]] -name = "dirs" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.61.2", -] - -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "dunce" version = "1.0.5" @@ -717,12 +378,6 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - [[package]] name = "encoding_rs" version = "0.8.35" @@ -757,28 +412,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "esaxx-rs" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d817e038c30374a4bcb22f94d0a8a0e216958d4c3dcde369b1439fec4bdda6e6" - -[[package]] -name = "fastembed" -version = "5.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3688aa7e02113db24e0f83aba1edee912f36f515b52cffc9b3c550bbfc3eab87" -dependencies = [ - "anyhow", - "hf-hub", - "ndarray", - "ort", - "safetensors", - "serde", - "serde_json", - "tokenizers", -] - [[package]] name = "fastrand" version = "2.3.0" @@ -808,16 +441,6 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" -[[package]] -name = "flate2" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "flowctl-cli" version = "0.1.31" @@ -829,17 +452,15 @@ dependencies = [ "flowctl-core", "flowctl-db", "flowctl-service", - "libsql", "miette", "regex", "serde", "serde_json", "sha2", "tempfile", - "tokio", "tracing", "trycmd", - "which 8.0.2", + "which", ] [[package]] @@ -853,7 +474,7 @@ dependencies = [ "serde-saphyr", "serde_json", "tempfile", - "thiserror 2.0.18", + "thiserror", "toml", ] @@ -862,42 +483,26 @@ name = "flowctl-db" version = "0.1.0" dependencies = [ "chrono", - "criterion", - "fastembed", "flowctl-core", - "libsql", - "nix", "serde", "serde_json", "tempfile", - "thiserror 2.0.18", - "tokio", - "tracing", ] [[package]] name = "flowctl-service" version = "0.1.0" dependencies = [ - "async-trait", "chrono", "flowctl-core", "flowctl-db", - "libsql", "serde", "serde_json", "tempfile", - "thiserror 2.0.18", - "tokio", + "thiserror", "tracing", ] -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - [[package]] name = "foldhash" version = "0.1.5" @@ -905,143 +510,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] -name = "foldhash" -version = "0.2.0" +name = "generic-array" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] [[package]] -name = "form_urlencoded" -version = "1.2.2" +name = "getrandom" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" - -[[package]] -name = "futures-executor" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" - -[[package]] -name = "futures-macro" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" - -[[package]] -name = "futures-task" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" - -[[package]] -name = "futures-util" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "r-efi 5.3.0", - "wasip2", - "wasm-bindgen", + "cfg-if", + "js-sys", + "libc", + "r-efi 5.3.0", + "wasip2", + "wasm-bindgen", ] [[package]] @@ -1069,24 +558,13 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" -[[package]] -name = "half" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" -dependencies = [ - "cfg-if", - "crunchy", - "zerocopy 0.8.48", -] - [[package]] name = "hashbrown" version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "foldhash 0.1.5", + "foldhash", ] [[package]] @@ -1094,13 +572,6 @@ name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash 0.2.0", - "serde", - "serde_core", -] [[package]] name = "heck" @@ -1108,86 +579,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" - -[[package]] -name = "hf-hub" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629d8f3bbeda9d148036d6b0de0a3ab947abd08ce90626327fc3547a49d59d97" -dependencies = [ - "dirs", - "http", - "indicatif", - "libc", - "log", - "rand", - "reqwest", - "serde", - "serde_json", - "thiserror 2.0.18", - "ureq 2.12.1", - "windows-sys 0.60.2", -] - -[[package]] -name = "hmac-sha256" -version = "1.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d92d097f4749b64e8cc33d924d9f40a2d4eb91402b458014b781f5733d60f" - -[[package]] -name = "home" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" -dependencies = [ - "windows-sys 0.61.2", -] - -[[package]] -name = "http" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" -dependencies = [ - "bytes", - "itoa", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http", -] - -[[package]] -name = "http-body-util" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" -dependencies = [ - "bytes", - "futures-core", - "http", - "http-body", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" - [[package]] name = "humantime" version = "2.3.0" @@ -1204,66 +595,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hyper" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" -dependencies = [ - "atomic-waker", - "bytes", - "futures-channel", - "futures-core", - "http", - "http-body", - "httparse", - "itoa", - "pin-project-lite", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-rustls" -version = "0.27.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" -dependencies = [ - "http", - "hyper", - "hyper-util", - "rustls", - "rustls-pki-types", - "tokio", - "tokio-rustls", - "tower-service", - "webpki-roots 1.0.6", -] - -[[package]] -name = "hyper-util" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-util", - "http", - "http-body", - "hyper", - "ipnet", - "libc", - "percent-encoding", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", -] - [[package]] name = "iana-time-zone" version = "0.1.65" @@ -1288,121 +619,12 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" -dependencies = [ - "displaydoc", - "potential_utf", - "utf8_iter", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locale_core" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_normalizer" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" -dependencies = [ - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" - -[[package]] -name = "icu_properties" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" -dependencies = [ - "icu_collections", - "icu_locale_core", - "icu_properties_data", - "icu_provider", - "zerotrie", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" - -[[package]] -name = "icu_provider" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" -dependencies = [ - "displaydoc", - "icu_locale_core", - "writeable", - "yoke", - "zerofrom", - "zerotrie", - "zerovec", -] - [[package]] name = "id-arena" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" -dependencies = [ - "icu_normalizer", - "icu_properties", -] - [[package]] name = "indexmap" version = "2.13.1" @@ -1415,46 +637,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "indicatif" -version = "0.17.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" -dependencies = [ - "console", - "number_prefix", - "portable-atomic", - "unicode-width 0.2.0", - "web-time", -] - -[[package]] -name = "ipnet" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" - -[[package]] -name = "iri-string" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "is-terminal" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.61.2", -] - [[package]] name = "is_ci" version = "1.2.0" @@ -1462,204 +644,62 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] -name = "is_terminal_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" - -[[package]] -name = "js-sys" -version = "0.3.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" -dependencies = [ - "cfg-if", - "futures-util", - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "leb128fmt" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" - -[[package]] -name = "libc" -version = "0.2.184" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" - -[[package]] -name = "libloading" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" -dependencies = [ - "cfg-if", - "windows-link", -] - -[[package]] -name = "libredox" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" -dependencies = [ - "bitflags", - "libc", - "plain", - "redox_syscall 0.7.3", -] - -[[package]] -name = "libsql" -version = "0.9.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30fe980ac5693ed1f3db490559fb578885e913a018df64af8a1a46e1959a78df" -dependencies = [ - "async-trait", - "bitflags", - "bytes", - "futures", - "libsql-sys", - "parking_lot", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "libsql-ffi" -version = "0.9.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be1da6f123ceb2cd23f469883415cab9ee963286a85d61e22afb8b12e15e681" -dependencies = [ - "bindgen", - "cc", - "cmake", - "glob", -] - -[[package]] -name = "libsql-sys" -version = "0.9.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90725458cc4461bc82f8f7983e80b002ea4f64b5184e1462f252d0dd74b122f5" -dependencies = [ - "bytes", - "libsql-ffi", - "once_cell", - "tracing", - "zerocopy 0.7.35", -] - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "linux-raw-sys" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" - -[[package]] -name = "litemap" -version = "0.8.2" +name = "is_terminal_polyfill" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] -name = "lock_api" -version = "0.4.14" +name = "itoa" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" -dependencies = [ - "scopeguard", -] +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] -name = "log" -version = "0.4.29" +name = "js-sys" +version = "0.3.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" +dependencies = [ + "once_cell", + "wasm-bindgen", +] [[package]] -name = "lru-slab" -version = "0.1.2" +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] -name = "lzma-rust2" -version = "0.15.7" +name = "libc" +version = "0.2.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1670343e58806300d87950e3401e820b519b9384281bbabfb15e3636689ffd69" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" [[package]] -name = "macro_rules_attribute" -version = "0.2.2" +name = "libredox" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65049d7923698040cd0b1ddcced9b0eb14dd22c5f86ae59c3740eab64a676520" +checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" dependencies = [ - "macro_rules_attribute-proc_macro", - "paste", + "bitflags", + "libc", + "plain", + "redox_syscall", ] [[package]] -name = "macro_rules_attribute-proc_macro" -version = "0.2.2" +name = "linux-raw-sys" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670fdfda89751bc4a84ac13eaa63e205cf0fd22b4c9a5fbfa085b63c1f1d3a30" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] -name = "matrixmultiply" -version = "0.3.10" +name = "log" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" -dependencies = [ - "autocfg", - "rawpointer", -] +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" @@ -1697,12 +737,6 @@ dependencies = [ "syn", ] -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "miniz_oxide" version = "0.8.9" @@ -1710,67 +744,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", - "simd-adler32", -] - -[[package]] -name = "mio" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.61.2", -] - -[[package]] -name = "monostate" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3341a273f6c9d5bef1908f17b7267bbab0e95c9bf69a0d4dcf8e9e1b2c76ef67" -dependencies = [ - "monostate-impl", - "serde", - "serde_core", -] - -[[package]] -name = "monostate-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4db6d5580af57bf992f59068d4ea26fd518574ff48d7639b255a36f9de6e7e9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ndarray" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520080814a7a6b4a6e9070823bb24b4531daac8c4627e08ba5de8c5ef2f2752d" -dependencies = [ - "matrixmultiply", - "num-complex", - "num-integer", - "num-traits", - "portable-atomic", - "portable-atomic-util", - "rawpointer", -] - -[[package]] -name = "nix" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" -dependencies = [ - "bitflags", - "cfg-if", - "cfg_aliases", - "libc", ] [[package]] @@ -1779,40 +752,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "normalize-line-endings" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -1822,12 +767,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - [[package]] name = "object" version = "0.37.3" @@ -1849,64 +788,6 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" -[[package]] -name = "onig" -version = "6.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "336b9c63443aceef14bea841b899035ae3abe89b7c486aaf4c5bd8aafedac3f0" -dependencies = [ - "bitflags", - "libc", - "once_cell", - "onig_sys", -] - -[[package]] -name = "onig_sys" -version = "69.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f86c6eef3d6df15f23bcfb6af487cbd2fed4e5581d58d5bf1f5f8b7f6727dc" -dependencies = [ - "cc", - "pkg-config", -] - -[[package]] -name = "oorandom" -version = "11.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" - -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - -[[package]] -name = "ort" -version = "2.0.0-rc.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5df903c0d2c07b56950f1058104ab0c8557159f2741782223704de9be73c3c" -dependencies = [ - "ndarray", - "ort-sys", - "smallvec", - "tracing", - "ureq 3.3.0", -] - -[[package]] -name = "ort-sys" -version = "2.0.0-rc.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06503bb33f294c5f1ba484011e053bfa6ae227074bdb841e9863492dc5960d4b" -dependencies = [ - "hmac-sha256", - "lzma-rust2", - "ureq 3.3.0", -] - [[package]] name = "os_pipe" version = "1.2.3" @@ -1923,47 +804,6 @@ version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" -[[package]] -name = "parking_lot" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall 0.5.18", - "smallvec", - "windows-link", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "percent-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" - [[package]] name = "petgraph" version = "0.7.1" @@ -1980,79 +820,12 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" -[[package]] -name = "pkg-config" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - [[package]] name = "plain" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" -[[package]] -name = "plotters" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" - -[[package]] -name = "plotters-svg" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "portable-atomic" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" - -[[package]] -name = "portable-atomic-util" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" -dependencies = [ - "portable-atomic", -] - -[[package]] -name = "potential_utf" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" -dependencies = [ - "zerovec", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy 0.8.48", -] - [[package]] name = "prettyplease" version = "0.2.37" @@ -2072,61 +845,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "quinn" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" -dependencies = [ - "bytes", - "cfg_aliases", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 2.1.2", - "rustls", - "socket2", - "thiserror 2.0.18", - "tokio", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-proto" -version = "0.11.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" -dependencies = [ - "bytes", - "getrandom 0.3.4", - "lru-slab", - "rand", - "ring", - "rustc-hash 2.1.2", - "rustls", - "rustls-pki-types", - "slab", - "thiserror 2.0.18", - "tinyvec", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-udp" -version = "0.5.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" -dependencies = [ - "cfg_aliases", - "libc", - "once_cell", - "socket2", - "tracing", - "windows-sys 0.60.2", -] - [[package]] name = "quote" version = "1.0.45" @@ -2149,59 +867,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" [[package]] -name = "rand" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" -dependencies = [ - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" -dependencies = [ - "getrandom 0.3.4", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rayon" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-cond" -version = "0.4.0" +name = "rayon" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2964d0cf57a3e7a06e8183d14a8b527195c706b7983549cd5462d5aa3747438f" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", - "itertools 0.14.0", - "rayon", + "rayon-core", ] [[package]] @@ -2214,15 +886,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "redox_syscall" -version = "0.5.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" -dependencies = [ - "bitflags", -] - [[package]] name = "redox_syscall" version = "0.7.3" @@ -2232,17 +895,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "redox_users" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" -dependencies = [ - "getrandom 0.2.17", - "libredox", - "thiserror 2.0.18", -] - [[package]] name = "regex" version = "1.12.3" @@ -2272,92 +924,12 @@ version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" -[[package]] -name = "reqwest" -version = "0.12.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-rustls", - "hyper-util", - "js-sys", - "log", - "percent-encoding", - "pin-project-lite", - "quinn", - "rustls", - "rustls-pki-types", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "tokio", - "tokio-rustls", - "tokio-util", - "tower", - "tower-http", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots 1.0.6", -] - -[[package]] -name = "ring" -version = "0.17.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" -dependencies = [ - "cc", - "cfg-if", - "getrandom 0.2.17", - "libc", - "untrusted", - "windows-sys 0.52.0", -] - [[package]] name = "rustc-demangle" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hash" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" - -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - [[package]] name = "rustix" version = "1.1.4" @@ -2367,69 +939,16 @@ dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys 0.12.1", + "linux-raw-sys", "windows-sys 0.61.2", ] -[[package]] -name = "rustls" -version = "0.23.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" -dependencies = [ - "log", - "once_cell", - "ring", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-pki-types" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" -dependencies = [ - "web-time", - "zeroize", -] - -[[package]] -name = "rustls-webpki" -version = "0.103.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" -[[package]] -name = "ryu" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" - -[[package]] -name = "safetensors" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675656c1eabb620b921efea4f9199f97fc86e36dd6ffd1fbbe48d0f59a4987f5" -dependencies = [ - "hashbrown 0.16.1", - "serde", - "serde_json", -] - [[package]] name = "same-file" version = "1.0.6" @@ -2447,15 +966,9 @@ checksum = "67dec0c833db75dc98957956b303fe447ffc5eb13f2325ef4c2350f7f3aa69e3" dependencies = [ "arraydeque", "smallvec", - "thiserror 2.0.18", + "thiserror", ] -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "semver" version = "1.0.28" @@ -2480,7 +993,7 @@ checksum = "09fbdfe7a27a1b1633dfc0c4c8e65940b8d819c5ddb9cca48ebc3223b00c8b14" dependencies = [ "ahash", "annotate-snippets", - "base64 0.22.1", + "base64", "encoding_rs_io", "getrandom 0.3.4", "nohash-hasher", @@ -2543,18 +1056,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - [[package]] name = "sha2" version = "0.10.9" @@ -2572,34 +1073,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "signal-hook-registry" -version = "1.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" -dependencies = [ - "errno", - "libc", -] - -[[package]] -name = "simd-adler32" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" - [[package]] name = "similar" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" -[[package]] -name = "slab" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" - [[package]] name = "smallvec" version = "1.15.1" @@ -2637,63 +1116,12 @@ dependencies = [ "anstream 0.6.21", ] -[[package]] -name = "socket2" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" -dependencies = [ - "libc", - "windows-sys 0.61.2", -] - -[[package]] -name = "socks" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3dbbd9ae980613c6dd8e28a9407b50509d3803b57624d5dfe8315218cd58b" -dependencies = [ - "byteorder", - "libc", - "winapi", -] - -[[package]] -name = "spm_precompiled" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5851699c4033c63636f7ea4cf7b7c1f1bf06d0cc03cfb42e711de5a5c46cf326" -dependencies = [ - "base64 0.13.1", - "nom", - "serde", - "unicode-segmentation", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - [[package]] name = "supports-color" version = "3.0.2" @@ -2726,26 +1154,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" -dependencies = [ - "futures-core", -] - -[[package]] -name = "synstructure" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tempfile" version = "3.27.0" @@ -2755,7 +1163,7 @@ dependencies = [ "fastrand", "getrandom 0.4.2", "once_cell", - "rustix 1.1.4", + "rustix", "windows-sys 0.61.2", ] @@ -2765,7 +1173,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "230a1b821ccbd75b185820a1f1ff7b14d21da1e442e22c0863ea5f08771a8874" dependencies = [ - "rustix 1.1.4", + "rustix", "windows-sys 0.61.2", ] @@ -2779,33 +1187,13 @@ dependencies = [ "unicode-width 0.2.0", ] -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] - [[package]] name = "thiserror" version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.18", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "thiserror-impl", ] [[package]] @@ -2819,125 +1207,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tinystr" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" -dependencies = [ - "displaydoc", - "zerovec", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tinyvec" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokenizers" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b238e22d44a15349529690fb07bd645cf58149a1b1e44d6cb5bd1641ff1a6223" -dependencies = [ - "ahash", - "aho-corasick", - "compact_str", - "dary_heap", - "derive_builder", - "esaxx-rs", - "getrandom 0.3.4", - "itertools 0.14.0", - "log", - "macro_rules_attribute", - "monostate", - "onig", - "paste", - "rand", - "rayon", - "rayon-cond", - "regex", - "regex-syntax", - "serde", - "serde_json", - "spm_precompiled", - "thiserror 2.0.18", - "unicode-normalization-alignments", - "unicode-segmentation", - "unicode_categories", -] - -[[package]] -name = "tokio" -version = "1.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd1c4c0fc4a7ab90fc15ef6daaa3ec3b893f004f915f2392557ed23237820cd" -dependencies = [ - "bytes", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys 0.61.2", -] - -[[package]] -name = "tokio-macros" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-rustls" -version = "0.26.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" -dependencies = [ - "rustls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - [[package]] name = "toml" version = "0.8.23" @@ -2994,74 +1263,29 @@ dependencies = [ "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", - "winnow 0.7.15", -] - -[[package]] -name = "toml_parser" -version = "1.1.2+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" -dependencies = [ - "winnow 1.0.1", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "toml_writer" -version = "1.1.1+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" - -[[package]] -name = "tower" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper", - "tokio", - "tower-layer", - "tower-service", + "winnow 0.7.15", ] [[package]] -name = "tower-http" -version = "0.6.8" +name = "toml_parser" +version = "1.1.2+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" dependencies = [ - "bitflags", - "bytes", - "futures-util", - "http", - "http-body", - "iri-string", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", + "winnow 1.0.1", ] [[package]] -name = "tower-layer" -version = "0.3.3" +name = "toml_write" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] -name = "tower-service" -version = "0.3.3" +name = "toml_writer" +version = "1.1.1+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" [[package]] name = "tracing" @@ -3094,12 +1318,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - [[package]] name = "trycmd" version = "0.15.11" @@ -3136,21 +1354,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" -[[package]] -name = "unicode-normalization-alignments" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de" -dependencies = [ - "smallvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" - [[package]] name = "unicode-width" version = "0.1.14" @@ -3169,90 +1372,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "ureq" -version = "2.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" -dependencies = [ - "base64 0.22.1", - "flate2", - "log", - "once_cell", - "rustls", - "rustls-pki-types", - "serde", - "serde_json", - "socks", - "url", - "webpki-roots 0.26.11", -] - -[[package]] -name = "ureq" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea7109cdcd5864d4eeb1b58a1648dc9bf520360d7af16ec26d0a9354bafcfc0" -dependencies = [ - "base64 0.22.1", - "log", - "percent-encoding", - "rustls", - "rustls-pki-types", - "socks", - "ureq-proto", - "utf8-zero", - "webpki-roots 1.0.6", -] - -[[package]] -name = "ureq-proto" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e994ba84b0bd1b1b0cf92878b7ef898a5c1760108fe7b6010327e274917a808c" -dependencies = [ - "base64 0.22.1", - "http", - "httparse", - "log", -] - -[[package]] -name = "url" -version = "2.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", - "serde", -] - -[[package]] -name = "utf8-zero" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8c0a043c9540bae7c578c88f91dda8bd82e59ae27c21baca69c8b191aaf5a6e" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" version = "0.2.2" @@ -3284,21 +1403,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - [[package]] name = "wasip2" version = "1.0.2+wasi-0.2.9" @@ -3330,16 +1434,6 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - [[package]] name = "wasm-bindgen-macro" version = "0.2.117" @@ -3394,19 +1488,6 @@ dependencies = [ "wasmparser", ] -[[package]] -name = "wasm-streams" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wasmparser" version = "0.244.0" @@ -3419,56 +1500,6 @@ dependencies = [ "semver", ] -[[package]] -name = "web-sys" -version = "0.3.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki-roots" -version = "0.26.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" -dependencies = [ - "webpki-roots 1.0.6", -] - -[[package]] -name = "webpki-roots" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "which" version = "8.0.2" @@ -3478,22 +1509,6 @@ dependencies = [ "libc", ] -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - [[package]] name = "winapi-util" version = "0.1.11" @@ -3503,12 +1518,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-core" version = "0.62.2" @@ -3568,31 +1577,13 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.5", + "windows-targets", ] [[package]] @@ -3604,22 +1595,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - [[package]] name = "windows-targets" version = "0.53.5" @@ -3627,106 +1602,58 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ "windows-link", - "windows_aarch64_gnullvm 0.53.1", - "windows_aarch64_msvc 0.53.1", - "windows_i686_gnu 0.53.1", - "windows_i686_gnullvm 0.53.1", - "windows_i686_msvc 0.53.1", - "windows_x86_64_gnu 0.53.1", - "windows_x86_64_gnullvm 0.53.1", - "windows_x86_64_msvc 0.53.1", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - [[package]] name = "windows_aarch64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - [[package]] name = "windows_aarch64_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - [[package]] name = "windows_i686_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - [[package]] name = "windows_i686_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - [[package]] name = "windows_i686_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - [[package]] name = "windows_x86_64_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - [[package]] name = "windows_x86_64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - [[package]] name = "windows_x86_64_msvc" version = "0.53.1" @@ -3836,63 +1763,13 @@ dependencies = [ "wasmparser", ] -[[package]] -name = "writeable" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" - -[[package]] -name = "yoke" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" -dependencies = [ - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - [[package]] name = "zerocopy" version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" dependencies = [ - "zerocopy-derive 0.8.48", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "zerocopy-derive", ] [[package]] @@ -3906,66 +1783,6 @@ dependencies = [ "syn", ] -[[package]] -name = "zerofrom" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zeroize" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" - -[[package]] -name = "zerotrie" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", -] - -[[package]] -name = "zerovec" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "zmij" version = "1.0.21" diff --git a/flowctl/Cargo.toml b/flowctl/Cargo.toml index 172d5b5b..751f8b40 100644 --- a/flowctl/Cargo.toml +++ b/flowctl/Cargo.toml @@ -40,25 +40,11 @@ tracing = "0.1" # DAG petgraph = "0.7" -# libSQL (async, native vectors) — the DB engine -libsql = { version = "0.9", default-features = false, features = ["core"] } - -# AI embeddings (memory semantic search) -# Use rustls to avoid OpenSSL cross-compile headaches on release builds. -fastembed = { version = "5.12", default-features = false, features = [ - "ort-download-binaries-rustls-tls", - "hf-hub-rustls-tls", -] } - # CLI (cli crate) clap = { version = "4", features = ["derive"] } clap_complete = "4" miette = { version = "7", features = ["fancy"] } -# Async runtime (service layer) -tokio = { version = "1", features = ["full"] } -tokio-util = { version = "0.7", features = ["rt"] } - # Unix process management nix = { version = "0.30", features = ["signal", "process"] } diff --git a/flowctl/crates/flowctl-cli/Cargo.toml b/flowctl/crates/flowctl-cli/Cargo.toml index ae989ec6..f5f24607 100644 --- a/flowctl/crates/flowctl-cli/Cargo.toml +++ b/flowctl/crates/flowctl-cli/Cargo.toml @@ -14,7 +14,6 @@ path = "src/main.rs" flowctl-core = { workspace = true } flowctl-db = { workspace = true } flowctl-service = { workspace = true } -libsql = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } anyhow = { workspace = true } @@ -23,7 +22,6 @@ clap = { workspace = true } clap_complete = { workspace = true } miette = { workspace = true } tracing = { workspace = true } -tokio = { workspace = true } regex = { workspace = true } sha2 = { workspace = true } which = { workspace = true } diff --git a/flowctl/crates/flowctl-cli/src/commands/admin/config.rs b/flowctl/crates/flowctl-cli/src/commands/admin/config.rs index 31ebb1a4..49482d7f 100644 --- a/flowctl/crates/flowctl-cli/src/commands/admin/config.rs +++ b/flowctl/crates/flowctl-cli/src/commands/admin/config.rs @@ -1,6 +1,5 @@ //! Config and state-path commands. -use std::env; use std::fs; use clap::Subcommand; @@ -15,13 +14,11 @@ use super::{deep_merge, get_default_config, get_flow_dir, write_json_file}; // ── State-path command ───────────────────────────────────────────── pub fn cmd_state_path(json_mode: bool, task: Option) { - let cwd = env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); - let state_dir = match crate::commands::db_shim::resolve_state_dir(&cwd) { - Ok(d) => d, - Err(e) => { - error_exit(&format!("Could not resolve state dir: {}", e)); - } - }; + let flow_dir = get_flow_dir(); + let state_dir = flow_dir.join(".state"); + if !state_dir.exists() { + let _ = std::fs::create_dir_all(&state_dir); + } if let Some(task_id) = task { if !flowctl_core::id::is_task_id(&task_id) { diff --git a/flowctl/crates/flowctl-cli/src/commands/admin/exchange.rs b/flowctl/crates/flowctl-cli/src/commands/admin/exchange.rs index 1006551a..61f8a417 100644 --- a/flowctl/crates/flowctl-cli/src/commands/admin/exchange.rs +++ b/flowctl/crates/flowctl-cli/src/commands/admin/exchange.rs @@ -1,71 +1,34 @@ //! Export and import commands. - -use std::env; -use std::fs; +//! +//! With file-based storage, export is a no-op (data is already in files) +//! and import scans files to rebuild any derived state. use serde_json::json; use crate::output::{error_exit, json_output}; -use flowctl_core::types::{EPICS_DIR, TASKS_DIR}; - -pub fn cmd_export(json: bool, epic_filter: Option, _format: String) { +pub fn cmd_export(json: bool, _epic_filter: Option, _format: String) { let flow_dir = super::get_flow_dir(); if !flow_dir.exists() { error_exit(".flow/ does not exist. Run 'flowctl init' first."); } - let cwd = env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); - let conn = crate::commands::db_shim::open(&cwd) - .unwrap_or_else(|e| error_exit(&format!("Failed to open DB: {e}"))); - - let epic_repo = crate::commands::db_shim::EpicRepo::new(&conn); - let task_repo = crate::commands::db_shim::TaskRepo::new(&conn); - let epics_dir = flow_dir.join(EPICS_DIR); - let _ = fs::create_dir_all(&epics_dir); - let epics = match &epic_filter { - Some(id) => match epic_repo.get(id) { - Ok(e) => vec![e], - Err(_) => { error_exit(&format!("Epic {} not found", id)); } - }, - None => epic_repo.list(None).unwrap_or_default(), - }; - - let mut epics_exported = 0; + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap_or_default(); + let mut tasks_count = 0; for epic in &epics { - let (_, body) = epic_repo.get_with_body(&epic.id).unwrap_or((epic.clone(), String::new())); - let doc = flowctl_core::frontmatter::Document { frontmatter: epic.clone(), body }; - if let Ok(content) = flowctl_core::frontmatter::write(&doc) { - let path = epics_dir.join(format!("{}.md", epic.id)); - let _ = fs::write(&path, content); - epics_exported += 1; - } - } - - let tasks_dir = flow_dir.join(TASKS_DIR); - let _ = fs::create_dir_all(&tasks_dir); - let mut tasks_exported = 0; - for epic in &epics { - let tasks = task_repo.list_by_epic(&epic.id).unwrap_or_default(); - for task in &tasks { - let (_, body) = task_repo.get_with_body(&task.id).unwrap_or((task.clone(), String::new())); - let doc = flowctl_core::frontmatter::Document { frontmatter: task.clone(), body }; - if let Ok(content) = flowctl_core::frontmatter::write(&doc) { - let path = tasks_dir.join(format!("{}.md", task.id)); - let _ = fs::write(&path, content); - tasks_exported += 1; - } - } + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, &epic.id).unwrap_or_default(); + tasks_count += tasks.len(); } if json { json_output(json!({ "success": true, - "epics_exported": epics_exported, - "tasks_exported": tasks_exported, + "epics_exported": epics.len(), + "tasks_exported": tasks_count, + "message": "Data is already in JSON files (file-based storage)", })); } else { - println!("Exported {} epics, {} tasks to .flow/", epics_exported, tasks_exported); + println!("Data is already in JSON files: {} epics, {} tasks in .flow/", epics.len(), tasks_count); } } @@ -74,29 +37,26 @@ pub fn cmd_import(json: bool) { if !flow_dir.exists() { error_exit(".flow/ does not exist. Run 'flowctl init' first."); } - let cwd = env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); - let conn = crate::commands::db_shim::open(&cwd) - .unwrap_or_else(|e| error_exit(&format!("Failed to open DB: {e}"))); - let state_dir = crate::commands::db_shim::resolve_state_dir(&cwd).ok(); - let result = crate::commands::db_shim::reindex(&conn, &flow_dir, state_dir.as_deref()) - .unwrap_or_else(|e| error_exit(&format!("Import failed: {e}"))); + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap_or_default(); + let mut tasks_count = 0; + for epic in &epics { + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, &epic.id).unwrap_or_default(); + tasks_count += tasks.len(); + } if json { json_output(json!({ "success": true, - "epics_imported": result.epics_indexed, - "tasks_imported": result.tasks_indexed, - "files_skipped": result.files_skipped, - "warnings": result.warnings, + "epics_imported": epics.len(), + "tasks_imported": tasks_count, + "files_skipped": 0, + "warnings": [], })); } else { println!( - "Imported {} epics, {} tasks ({} skipped)", - result.epics_indexed, result.tasks_indexed, result.files_skipped + "Scanned {} epics, {} tasks from .flow/ (file-based storage, no DB to import into)", + epics.len(), tasks_count ); - for w in &result.warnings { - eprintln!(" warning: {w}"); - } } } diff --git a/flowctl/crates/flowctl-cli/src/commands/admin/init.rs b/flowctl/crates/flowctl-cli/src/commands/admin/init.rs index c0bee3e5..4c5465f1 100644 --- a/flowctl/crates/flowctl-cli/src/commands/admin/init.rs +++ b/flowctl/crates/flowctl-cli/src/commands/admin/init.rs @@ -74,40 +74,22 @@ pub fn cmd_init(json: bool) { } } - // Create/open flow.db (runs migrations automatically) - match crate::commands::db_shim::open(&cwd) { - Ok(conn) => { - actions.push("flow.db ready".to_string()); - - // Auto-import from existing MD files if epics exist - let epics_dir = flow_dir.join(EPICS_DIR); - if epics_dir.is_dir() { - let has_md_files = fs::read_dir(&epics_dir) - .map(|entries| entries.flatten().any(|e| { - e.file_name().to_string_lossy().ends_with(".md") - })) - .unwrap_or(false); - - if has_md_files { - match crate::commands::db_shim::reindex(&conn, &flow_dir, None) { - Ok(result) => { - actions.push(format!( - "auto-imported {} epics, {} tasks from MD", - result.epics_indexed, result.tasks_indexed - )); - } - Err(e) => { - eprintln!("warning: auto-import failed: {e}"); - } - } - } - } - } - Err(e) => { - eprintln!("warning: DB creation failed: {e}"); + // Ensure .state directory exists for runtime state + let state_dir = flow_dir.join(".state"); + if !state_dir.exists() { + if let Err(e) = fs::create_dir_all(&state_dir) { + eprintln!("warning: failed to create .state/: {e}"); + } else { + actions.push("created .state/".to_string()); } } + // Ensure FlowStore dirs are ready + let store = flowctl_db::FlowStore::new(flow_dir.clone()); + if let Err(e) = store.ensure_dirs() { + eprintln!("warning: failed to ensure store dirs: {e}"); + } + // Build output let message = if actions.is_empty() { ".flow/ already up to date".to_string() diff --git a/flowctl/crates/flowctl-cli/src/commands/admin/status.rs b/flowctl/crates/flowctl-cli/src/commands/admin/status.rs index 0a920a29..1d0c09c0 100644 --- a/flowctl/crates/flowctl-cli/src/commands/admin/status.rs +++ b/flowctl/crates/flowctl-cli/src/commands/admin/status.rs @@ -1,6 +1,6 @@ //! Status, doctor, and validate commands. -use std::env; + use std::fs; use std::path::Path; use std::process::Command; @@ -594,28 +594,22 @@ pub fn cmd_doctor(json_mode: bool, workflow: bool) { } // Check 2: State-dir accessibility - let cwd = env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); - match crate::commands::db_shim::resolve_state_dir(&cwd) { - Ok(state_dir) => { - if let Err(e) = fs::create_dir_all(&state_dir) { - checks.push(json!({"name": "state_dir_access", "status": "fail", "message": format!("State dir not accessible: {}", e)})); - } else { - // Test write access - let test_file = state_dir.join(".doctor-probe"); - match fs::write(&test_file, "probe") { - Ok(_) => { - let _ = fs::remove_file(&test_file); - checks.push(json!({"name": "state_dir_access", "status": "pass", "message": format!("State dir accessible: {}", state_dir.display())})); - } - Err(e) => { - checks.push(json!({"name": "state_dir_access", "status": "fail", "message": format!("State dir not writable: {}", e)})); - } + { + let state_dir = flow_dir.join(".state"); + if let Err(e) = fs::create_dir_all(&state_dir) { + checks.push(json!({"name": "state_dir_access", "status": "fail", "message": format!("State dir not accessible: {}", e)})); + } else { + let test_file = state_dir.join(".doctor-probe"); + match fs::write(&test_file, "probe") { + Ok(_) => { + let _ = fs::remove_file(&test_file); + checks.push(json!({"name": "state_dir_access", "status": "pass", "message": format!("State dir accessible: {}", state_dir.display())})); + } + Err(e) => { + checks.push(json!({"name": "state_dir_access", "status": "fail", "message": format!("State dir not writable: {}", e)})); } } } - Err(e) => { - checks.push(json!({"name": "state_dir_access", "status": "fail", "message": format!("Could not resolve state dir: {}", e)})); - } } // Check 3: Config validity @@ -730,23 +724,12 @@ pub fn cmd_doctor(json_mode: bool, workflow: bool) { } } - // Check 7: stale file locks (count via SQL) - let cwd = env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from(".")); - if let Ok(conn) = crate::commands::db_shim::open(&cwd) { - let lock_count = crate::commands::db_shim::block_on_pub(async { - let mut rows = conn.inner_conn() - .query("SELECT COUNT(*) FROM file_locks", ()) - .await - .map_err(flowctl_db::DbError::LibSql)?; - if let Some(row) = rows.next().await.map_err(flowctl_db::DbError::LibSql)? { - Ok::(row.get::(0).unwrap_or(0)) - } else { - Ok(0) - } - }); - match lock_count { - Ok(n) if n > 0 => { - checks.push(json!({"name": "stale_locks", "status": "warn", "message": format!("{} file lock(s) active — verify with 'flowctl lock-check'", n)})); + // Check 7: stale file locks + { + let store = flowctl_db::FlowStore::new(flow_dir.clone()); + match store.locks().list() { + Ok(locks) if !locks.is_empty() => { + checks.push(json!({"name": "stale_locks", "status": "warn", "message": format!("{} file lock(s) active — verify with 'flowctl lock-check'", locks.len())})); } Ok(_) => { checks.push(json!({"name": "stale_locks", "status": "pass", "message": "No active file locks"})); diff --git a/flowctl/crates/flowctl-cli/src/commands/approval.rs b/flowctl/crates/flowctl-cli/src/commands/approval.rs index 4ddb9d6b..fe5fec13 100644 --- a/flowctl/crates/flowctl-cli/src/commands/approval.rs +++ b/flowctl/crates/flowctl-cli/src/commands/approval.rs @@ -1,19 +1,18 @@ //! Approval commands: `flowctl approval create|list|show|approve|reject`. //! -//! All operations go directly through libSQL. +//! All operations use file-based storage via FlowStore. -use std::env; use std::time::{Duration, Instant}; use clap::Subcommand; use serde_json::Value; use flowctl_core::approvals::{ApprovalKind, ApprovalStatus, CreateApprovalRequest}; -use flowctl_service::approvals::{ApprovalStore, LibSqlApprovalStore}; +use flowctl_service::approvals::FileApprovalStore; use crate::output::{error_exit, json_output}; -use super::helpers::resolve_actor; +use super::helpers::{get_flow_dir, resolve_actor}; #[derive(Subcommand, Debug)] pub enum ApprovalCmd { @@ -62,43 +61,27 @@ pub enum ApprovalCmd { } pub fn dispatch(cmd: &ApprovalCmd, json: bool) { - // Every subcommand touches async DB/HTTP — run on a Tokio runtime. - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap_or_else(|e| error_exit(&format!("tokio runtime: {e}"))); - rt.block_on(async move { - match cmd { - ApprovalCmd::Create { task, kind, payload } => { - cmd_create(json, task, kind, payload).await - } - ApprovalCmd::List { pending } => cmd_list(json, *pending).await, - ApprovalCmd::Show { id, wait, timeout } => { - cmd_show(json, id, *wait, *timeout).await - } - ApprovalCmd::Approve { id } => cmd_approve(json, id).await, - ApprovalCmd::Reject { id, reason } => { - cmd_reject(json, id, reason.clone()).await - } + match cmd { + ApprovalCmd::Create { task, kind, payload } => { + cmd_create(json, task, kind, payload) } - }); + ApprovalCmd::List { pending } => cmd_list(json, *pending), + ApprovalCmd::Show { id, wait, timeout } => { + cmd_show(json, id, *wait, *timeout) + } + ApprovalCmd::Approve { id } => cmd_approve(json, id), + ApprovalCmd::Reject { id, reason } => { + cmd_reject(json, id, reason.clone()) + } + } } -// ── DB operations ─────────────────────────────────────────────────── - -async fn open_local_store() -> LibSqlApprovalStore { - let cwd = env::current_dir() - .unwrap_or_else(|e| error_exit(&format!("cwd: {e}"))); - let db = flowctl_db::open_async(&cwd) - .await - .unwrap_or_else(|e| error_exit(&format!("open db: {e}"))); - let conn = db - .connect() - .unwrap_or_else(|e| error_exit(&format!("connect db: {e}"))); - // Leak the Database so the connection stays valid for the rest of the - // process lifetime (CLI is short-lived). - Box::leak(Box::new(db)); - LibSqlApprovalStore::new(conn) +// ── Store access ─────────────────────────────────────────────────── + +fn open_local_store() -> FileApprovalStore { + let flow_dir = get_flow_dir(); + let store = flowctl_db::FlowStore::new(flow_dir); + FileApprovalStore::new(store) } // ── Payload parsing ───────────────────────────────────────────────── @@ -116,25 +99,24 @@ fn parse_payload(s: &str) -> Value { // ── Command impls ─────────────────────────────────────────────────── -async fn cmd_create(json: bool, task: &str, kind_str: &str, payload: &str) { +fn cmd_create(json: bool, task: &str, kind_str: &str, payload: &str) { let kind = ApprovalKind::parse(kind_str) .unwrap_or_else(|| error_exit(&format!("invalid --kind: {kind_str}"))); let payload_val = parse_payload(payload); - let store = open_local_store().await; + let store = open_local_store(); let created = store .create(CreateApprovalRequest { task_id: task.to_string(), kind, payload: payload_val, }) - .await .unwrap_or_else(|e| error_exit(&format!("create: {e}"))); emit_result(json, serde_json::to_value(&created).unwrap_or_default()); } -async fn cmd_list(json: bool, pending_only: bool) { - let store = open_local_store().await; +fn cmd_list(json: bool, pending_only: bool) { + let store = open_local_store(); let filter = if pending_only { Some(ApprovalStatus::Pending) } else { @@ -142,14 +124,13 @@ async fn cmd_list(json: bool, pending_only: bool) { }; let approvals = store .list(filter) - .await .unwrap_or_else(|e| error_exit(&format!("list: {e}"))); emit_list(json, serde_json::to_value(&approvals).unwrap_or_default()); } -async fn cmd_show(json: bool, id: &str, wait: bool, timeout_secs: u64) { +fn cmd_show(json: bool, id: &str, wait: bool, timeout_secs: u64) { if !wait { - let val = fetch_one(id).await; + let val = fetch_one(id); emit_result(json, val); return; } @@ -157,7 +138,7 @@ async fn cmd_show(json: bool, id: &str, wait: bool, timeout_secs: u64) { // Poll every 1s until status != pending OR timeout elapsed. let deadline = Instant::now() + Duration::from_secs(timeout_secs); loop { - let val = fetch_one(id).await; + let val = fetch_one(id); let status = val.get("status").and_then(|v| v.as_str()).unwrap_or(""); if status != "pending" { emit_result(json, val); @@ -168,35 +149,32 @@ async fn cmd_show(json: bool, id: &str, wait: bool, timeout_secs: u64) { "timeout waiting for approval {id} (status still pending after {timeout_secs}s)" )); } - tokio::time::sleep(Duration::from_secs(1)).await; + std::thread::sleep(Duration::from_secs(1)); } } -async fn fetch_one(id: &str) -> Value { - let store = open_local_store().await; +fn fetch_one(id: &str) -> Value { + let store = open_local_store(); let approval = store .get(id) - .await .unwrap_or_else(|e| error_exit(&format!("get: {e}"))); serde_json::to_value(&approval).unwrap_or_default() } -async fn cmd_approve(json: bool, id: &str) { +fn cmd_approve(json: bool, id: &str) { let resolver = resolve_actor(); - let store = open_local_store().await; + let store = open_local_store(); let resolved = store .approve(id, Some(resolver)) - .await .unwrap_or_else(|e| error_exit(&format!("approve: {e}"))); emit_result(json, serde_json::to_value(&resolved).unwrap_or_default()); } -async fn cmd_reject(json: bool, id: &str, reason: Option) { +fn cmd_reject(json: bool, id: &str, reason: Option) { let resolver = resolve_actor(); - let store = open_local_store().await; + let store = open_local_store(); let resolved = store .reject(id, Some(resolver), reason) - .await .unwrap_or_else(|e| error_exit(&format!("reject: {e}"))); emit_result(json, serde_json::to_value(&resolved).unwrap_or_default()); } diff --git a/flowctl/crates/flowctl-cli/src/commands/checkpoint.rs b/flowctl/crates/flowctl-cli/src/commands/checkpoint.rs index 1d5746d0..c8b41cc4 100644 --- a/flowctl/crates/flowctl-cli/src/commands/checkpoint.rs +++ b/flowctl/crates/flowctl-cli/src/commands/checkpoint.rs @@ -4,7 +4,6 @@ //! Each checkpoint is a copy of the flowctl.db file stored alongside it //! with an epic-specific suffix. -use std::env; use std::fs; use clap::Subcommand; @@ -50,16 +49,11 @@ pub fn dispatch(cmd: &CheckpointCmd, json: bool) { /// Resolve the checkpoint file path for a given epic. /// Checkpoints are stored in the state directory alongside the main database. fn checkpoint_path(epic_id: &str) -> Result { - let cwd = env::current_dir().map_err(|e| format!("Cannot get cwd: {}", e))?; - let state_dir = crate::commands::db_shim::resolve_state_dir(&cwd) - .map_err(|e| format!("Cannot resolve state dir: {}", e))?; - Ok(state_dir.join(format!("checkpoint-{}.db", epic_id))) -} - -/// Resolve the main database path. -fn db_path() -> Result { - let cwd = env::current_dir().map_err(|e| format!("Cannot get cwd: {}", e))?; - crate::commands::db_shim::resolve_db_path(&cwd).map_err(|e| format!("Cannot resolve db path: {}", e)) + let flow_dir = get_flow_dir(); + let state_dir = flow_dir.join(".state"); + std::fs::create_dir_all(&state_dir) + .map_err(|e| format!("Cannot create state dir: {}", e))?; + Ok(state_dir.join(format!("checkpoint-{}.json", epic_id))) } fn validate_prerequisites(epic_id: &str) { @@ -77,49 +71,47 @@ fn validate_prerequisites(epic_id: &str) { fn cmd_checkpoint_save(json_mode: bool, epic_id: &str) { validate_prerequisites(epic_id); - let src = match db_path() { - Ok(p) => p, - Err(e) => error_exit(&e), - }; + let flow_dir = get_flow_dir(); - if !src.exists() { - error_exit("No database found. Run 'flowctl init' and index first."); + // Snapshot: serialize current epic + tasks state to a checkpoint JSON + let epic = flowctl_core::json_store::epic_read(&flow_dir, epic_id).ok(); + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, epic_id).unwrap_or_default(); + + let mut task_states = Vec::new(); + for task in &tasks { + let state = flowctl_core::json_store::state_read(&flow_dir, &task.id).ok(); + task_states.push(json!({"task": task, "state": state})); } + let checkpoint = json!({ + "epic": epic, + "tasks": task_states, + "timestamp": chrono::Utc::now().to_rfc3339(), + }); + let dst = match checkpoint_path(epic_id) { Ok(p) => p, Err(e) => error_exit(&e), }; - // Ensure parent directory exists if let Some(parent) = dst.parent() { let _ = fs::create_dir_all(parent); } - // Copy the database file (SQLite WAL-safe: we copy the main file; - // for a fully safe checkpoint we'd use the backup API, but a file - // copy is sufficient for crash recovery purposes). - if let Err(e) = fs::copy(&src, &dst) { - error_exit(&format!( - "Failed to save checkpoint: {}", - e - )); + let content = serde_json::to_string_pretty(&checkpoint).unwrap_or_default(); + if let Err(e) = fs::write(&dst, &content) { + error_exit(&format!("Failed to save checkpoint: {}", e)); } - let size = fs::metadata(&dst).map(|m| m.len()).unwrap_or(0); - if json_mode { json_output(json!({ "epic": epic_id, "checkpoint": dst.to_string_lossy(), - "size_bytes": size, + "size_bytes": content.len(), "message": format!("Checkpoint saved for {}", epic_id), })); } else { - println!( - "Checkpoint saved for {} ({} bytes)", - epic_id, size - ); + println!("Checkpoint saved for {} ({} bytes)", epic_id, content.len()); } } @@ -138,21 +130,28 @@ fn cmd_checkpoint_restore(json_mode: bool, epic_id: &str) { )); } - let dst = match db_path() { - Ok(p) => p, - Err(e) => error_exit(&e), - }; + // Read checkpoint, restore task states + let content = fs::read_to_string(&src).unwrap_or_else(|e| { + error_exit(&format!("Failed to read checkpoint: {}", e)); + }); + let checkpoint: serde_json::Value = serde_json::from_str(&content).unwrap_or_else(|e| { + error_exit(&format!("Invalid checkpoint JSON: {}", e)); + }); - // Ensure parent directory exists - if let Some(parent) = dst.parent() { - let _ = fs::create_dir_all(parent); - } - - if let Err(e) = fs::copy(&src, &dst) { - error_exit(&format!( - "Failed to restore checkpoint: {}", - e - )); + let flow_dir = get_flow_dir(); + if let Some(tasks) = checkpoint.get("tasks").and_then(|t| t.as_array()) { + for entry in tasks { + if let (Some(task_id), Some(state)) = ( + entry.get("task").and_then(|t| t.get("id")).and_then(|i| i.as_str()), + entry.get("state"), + ) { + if !state.is_null() { + if let Ok(task_state) = serde_json::from_value::(state.clone()) { + let _ = flowctl_core::json_store::state_write(&flow_dir, task_id, &task_state); + } + } + } + } } if json_mode { diff --git a/flowctl/crates/flowctl-cli/src/commands/db_shim.rs b/flowctl/crates/flowctl-cli/src/commands/db_shim.rs deleted file mode 100644 index f4e8a18a..00000000 --- a/flowctl/crates/flowctl-cli/src/commands/db_shim.rs +++ /dev/null @@ -1,492 +0,0 @@ -//! Sync shim over `flowctl-db` (async libSQL) providing the same API -//! surface as the deprecated `flowctl-db` (rusqlite) crate. -//! -//! Every sync method spins up a per-call `tokio::runtime::Builder:: -//! new_current_thread` runtime, which is cheap for CLI command invocation. -//! The shim exists so the many sync CLI call sites can stay as-is while -//! the underlying storage is async libSQL. -//! -//! This module is the canonical CLI entry point: `crate::commands::db_shim -//! as flowctl_db` (glob-style) is the migration pattern. Do not add -//! long-lived futures or background tasks here. - -#![allow(dead_code)] - -use std::path::{Path, PathBuf}; - -pub use flowctl_db::{DbError, GapRow, LockEntry, LockMode, ReindexResult}; -pub use flowctl_db::metrics::{ - Bottleneck, DoraMetrics, EpicStats, Summary, TokenBreakdown, WeeklyTrend, -}; - -/// Wrapped libSQL connection. Produced by [`open`]; passed by reference to -/// the repos mirroring the old rusqlite API. -#[derive(Clone)] -pub struct Connection { - conn: libsql::Connection, -} - -impl Connection { - fn inner(&self) -> libsql::Connection { - self.conn.clone() - } - - /// Public accessor for modules that need the raw libsql connection - /// (e.g. skill commands that call async repos directly). - pub fn inner_conn(&self) -> libsql::Connection { - self.conn.clone() - } -} - -fn block_on(fut: F) -> F::Output { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to create tokio runtime") - .block_on(fut) -} - -/// Public block_on for command modules that call async repos directly. -pub fn block_on_pub(fut: F) -> F::Output { - block_on(fut) -} - -// ── Pool functions ────────────────────────────────────────────────── - -pub fn resolve_state_dir(working_dir: &Path) -> Result { - flowctl_db::resolve_state_dir(working_dir) -} - -pub fn resolve_db_path(working_dir: &Path) -> Result { - flowctl_db::resolve_db_path(working_dir) -} - -pub fn open(working_dir: &Path) -> Result { - block_on(async { - let db = flowctl_db::open_async(working_dir).await?; - let conn = db.connect()?; - // Leak the Database handle to keep it alive for the process lifetime. - // (libsql Database drop closes the file.) - std::mem::forget(db); - Ok(Connection { conn }) - }) -} - -/// Open DB connection with hard error on failure (DB must be available). -/// This is the preferred entry point — all CLI code should use this -/// (DB is the sole source of truth, no fallback path). -pub fn require_db() -> Result { - let cwd = std::env::current_dir() - .map_err(|e| DbError::StateDir(format!("cannot get current dir: {e}")))?; - open(&cwd) -} - -pub fn cleanup(conn: &Connection) -> Result { - block_on(flowctl_db::cleanup(&conn.inner())) -} - -/// Get the maximum epic number from DB. -pub fn max_epic_num(conn: &Connection) -> Result { - block_on(flowctl_db::max_epic_num(&conn.inner())) -} - -/// Get the maximum task number for an epic from DB. -pub fn max_task_num(conn: &Connection, epic_id: &str) -> Result { - block_on(flowctl_db::max_task_num(&conn.inner(), epic_id)) -} - -pub fn reindex( - conn: &Connection, - flow_dir: &Path, - state_dir: Option<&Path>, -) -> Result { - block_on(flowctl_db::reindex(&conn.inner(), flow_dir, state_dir)) -} - -// ── Epic repository ──────────────────────────────────────────────── - -pub struct EpicRepo(libsql::Connection); - -impl EpicRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn get(&self, id: &str) -> Result { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).get(id)) - } - - pub fn get_with_body( - &self, - id: &str, - ) -> Result<(flowctl_core::types::Epic, String), DbError> { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).get_with_body(id)) - } - - pub fn list( - &self, - status: Option<&str>, - ) -> Result, DbError> { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).list(status)) - } - - pub fn upsert(&self, epic: &flowctl_core::types::Epic) -> Result<(), DbError> { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).upsert(epic)) - } - - pub fn upsert_with_body( - &self, - epic: &flowctl_core::types::Epic, - body: &str, - ) -> Result<(), DbError> { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).upsert_with_body(epic, body)) - } - - pub fn update_status( - &self, - id: &str, - status: flowctl_core::types::EpicStatus, - ) -> Result<(), DbError> { - block_on(flowctl_db::EpicRepo::new(self.0.clone()).update_status(id, status)) - } -} - -// ── Task repository ──────────────────────────────────────────────── - -pub struct TaskRepo(libsql::Connection); - -impl TaskRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn get(&self, id: &str) -> Result { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).get(id)) - } - - pub fn get_with_body( - &self, - id: &str, - ) -> Result<(flowctl_core::types::Task, String), DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).get_with_body(id)) - } - - pub fn list_by_epic( - &self, - epic_id: &str, - ) -> Result, DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).list_by_epic(epic_id)) - } - - pub fn list_all( - &self, - status: Option<&str>, - domain: Option<&str>, - ) -> Result, DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).list_all(status, domain)) - } - - pub fn upsert(&self, task: &flowctl_core::types::Task) -> Result<(), DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).upsert(task)) - } - - pub fn upsert_with_body( - &self, - task: &flowctl_core::types::Task, - body: &str, - ) -> Result<(), DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).upsert_with_body(task, body)) - } - - pub fn update_status( - &self, - id: &str, - status: flowctl_core::state_machine::Status, - ) -> Result<(), DbError> { - block_on(flowctl_db::TaskRepo::new(self.0.clone()).update_status(id, status)) - } -} - -// ── Dep repository ───────────────────────────────────────────────── - -pub struct DepRepo(libsql::Connection); - -impl DepRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn add_task_dep(&self, task_id: &str, depends_on: &str) -> Result<(), DbError> { - block_on( - flowctl_db::DepRepo::new(self.0.clone()).add_task_dep(task_id, depends_on), - ) - } - - pub fn remove_task_dep(&self, task_id: &str, depends_on: &str) -> Result<(), DbError> { - block_on( - flowctl_db::DepRepo::new(self.0.clone()).remove_task_dep(task_id, depends_on), - ) - } - - pub fn list_task_deps(&self, task_id: &str) -> Result, DbError> { - block_on(flowctl_db::DepRepo::new(self.0.clone()).list_task_deps(task_id)) - } - - pub fn add_epic_dep(&self, epic_id: &str, depends_on: &str) -> Result<(), DbError> { - block_on( - flowctl_db::DepRepo::new(self.0.clone()).add_epic_dep(epic_id, depends_on), - ) - } - - pub fn remove_epic_dep(&self, epic_id: &str, depends_on: &str) -> Result<(), DbError> { - block_on( - flowctl_db::DepRepo::new(self.0.clone()).remove_epic_dep(epic_id, depends_on), - ) - } - - pub fn list_epic_deps(&self, epic_id: &str) -> Result, DbError> { - block_on(flowctl_db::DepRepo::new(self.0.clone()).list_epic_deps(epic_id)) - } - - /// Replace all deps for a task (delete-all + insert each). - pub fn replace_task_deps(&self, task_id: &str, deps: &[String]) -> Result<(), DbError> { - let inner = self.0.clone(); - block_on(async move { - inner - .execute( - "DELETE FROM task_deps WHERE task_id = ?1", - libsql::params![task_id.to_string()], - ) - .await?; - for d in deps { - inner - .execute( - "INSERT INTO task_deps (task_id, depends_on) VALUES (?1, ?2)", - libsql::params![task_id.to_string(), d.to_string()], - ) - .await?; - } - Ok::<(), DbError>(()) - }) - } -} - -// ── Runtime repository ───────────────────────────────────────────── - -pub struct RuntimeRepo(libsql::Connection); - -impl RuntimeRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn get( - &self, - task_id: &str, - ) -> Result, DbError> { - block_on(flowctl_db::RuntimeRepo::new(self.0.clone()).get(task_id)) - } - - pub fn upsert( - &self, - state: &flowctl_core::types::RuntimeState, - ) -> Result<(), DbError> { - block_on(flowctl_db::RuntimeRepo::new(self.0.clone()).upsert(state)) - } -} - -// ── File lock repository ─────────────────────────────────────────── - -pub struct FileLockRepo(libsql::Connection); - -impl FileLockRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn acquire(&self, file_path: &str, task_id: &str, mode: &LockMode) -> Result<(), DbError> { - block_on( - flowctl_db::FileLockRepo::new(self.0.clone()).acquire(file_path, task_id, mode), - ) - } - - pub fn release_for_task(&self, task_id: &str) -> Result { - block_on(flowctl_db::FileLockRepo::new(self.0.clone()).release_for_task(task_id)) - } - - pub fn release_all(&self) -> Result { - block_on(flowctl_db::FileLockRepo::new(self.0.clone()).release_all()) - } - - pub fn check(&self, file_path: &str) -> Result, DbError> { - block_on(flowctl_db::FileLockRepo::new(self.0.clone()).check(file_path)) - } - - pub fn check_locks(&self, file_path: &str) -> Result, DbError> { - block_on(flowctl_db::FileLockRepo::new(self.0.clone()).check_locks(file_path)) - } - - pub fn heartbeat(&self, task_id: &str) -> Result { - block_on(flowctl_db::FileLockRepo::new(self.0.clone()).heartbeat(task_id)) - } - - /// List all active locks: (file_path, task_id, locked_at, lock_mode). - pub fn list_all(&self) -> Result, DbError> { - let inner = self.0.clone(); - block_on(async move { - let mut rows = inner - .query( - "SELECT file_path, task_id, locked_at, lock_mode FROM file_locks ORDER BY file_path", - (), - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(( - row.get::(0)?, - row.get::(1)?, - row.get::(2)?, - row.get::(3)?, - )); - } - Ok(out) - }) - } -} - -// ── Event repository ────────────────────────────────────────────── - -pub struct EventRepoSync(libsql::Connection); - -impl EventRepoSync { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - /// Return the inner async EventRepo for use with ChangesApplier. - pub fn as_async(&self) -> flowctl_db::EventRepo { - flowctl_db::EventRepo::new(self.0.clone()) - } -} - -// ── Phase progress repository ────────────────────────────────────── - -pub struct PhaseProgressRepo(libsql::Connection); - -impl PhaseProgressRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn get_completed(&self, task_id: &str) -> Result, DbError> { - block_on( - flowctl_db::PhaseProgressRepo::new(self.0.clone()).get_completed(task_id), - ) - } - - pub fn mark_done(&self, task_id: &str, phase: &str) -> Result<(), DbError> { - block_on( - flowctl_db::PhaseProgressRepo::new(self.0.clone()).mark_done(task_id, phase), - ) - } -} - -// ── Gap repository ──────────────────────────────────────────────── - -pub struct GapRepo(libsql::Connection); - -impl GapRepo { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn add( - &self, - epic_id: &str, - capability: &str, - priority: &str, - source: Option<&str>, - task_id: Option<&str>, - ) -> Result { - block_on( - flowctl_db::GapRepo::new(self.0.clone()) - .add(epic_id, capability, priority, source, task_id), - ) - } - - pub fn list( - &self, - epic_id: &str, - status: Option<&str>, - ) -> Result, DbError> { - block_on( - flowctl_db::GapRepo::new(self.0.clone()) - .list(epic_id, status), - ) - } - - pub fn remove(&self, id: i64) -> Result<(), DbError> { - block_on(flowctl_db::GapRepo::new(self.0.clone()).remove(id)) - } - - pub fn remove_all(&self, epic_id: &str) -> Result { - block_on(flowctl_db::GapRepo::new(self.0.clone()).remove_all(epic_id)) - } - - pub fn resolve(&self, id: i64, evidence: &str) -> Result<(), DbError> { - block_on(flowctl_db::GapRepo::new(self.0.clone()).resolve(id, evidence)) - } - - pub fn resolve_by_capability( - &self, - epic_id: &str, - capability: &str, - evidence: &str, - ) -> Result<(), DbError> { - block_on( - flowctl_db::GapRepo::new(self.0.clone()) - .resolve_by_capability(epic_id, capability, evidence), - ) - } -} - -// ── Stats query ──────────────────────────────────────────────────── - -pub struct StatsQuery(libsql::Connection); - -impl StatsQuery { - pub fn new(conn: &Connection) -> Self { - Self(conn.inner()) - } - - pub fn summary(&self) -> Result { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).summary()) - } - - pub fn per_epic(&self, epic_id: Option<&str>) -> Result, DbError> { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).epic_stats(epic_id)) - } - - pub fn weekly_trends(&self, weeks: u32) -> Result, DbError> { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).weekly_trends(weeks)) - } - - pub fn token_breakdown( - &self, - epic_id: Option<&str>, - ) -> Result, DbError> { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).token_breakdown(epic_id)) - } - - pub fn bottlenecks(&self, limit: usize) -> Result, DbError> { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).bottlenecks(limit)) - } - - pub fn dora_metrics(&self) -> Result { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).dora_metrics()) - } - - pub fn generate_monthly_rollups(&self) -> Result { - block_on(flowctl_db::StatsQuery::new(self.0.clone()).generate_monthly_rollups()) - } -} diff --git a/flowctl/crates/flowctl-cli/src/commands/gap.rs b/flowctl/crates/flowctl-cli/src/commands/gap.rs index df535172..9d7d7a57 100644 --- a/flowctl/crates/flowctl-cli/src/commands/gap.rs +++ b/flowctl/crates/flowctl-cli/src/commands/gap.rs @@ -1,7 +1,7 @@ //! Gap registry commands: gap add, list, resolve, check. //! //! Gaps track requirement deficiencies in an epic. They are stored in -//! the DB `gaps` table (sole source of truth). Blocking gaps +//! JSON files under `gaps/.json`. Blocking gaps //! (required/important) prevent epic closure. use clap::Subcommand; @@ -10,6 +10,7 @@ use serde_json::json; use crate::output::{error_exit, json_output, pretty_output}; use flowctl_core::id::is_epic_id; +use flowctl_db::{FlowStore, GapEntry}; use super::helpers::get_flow_dir; @@ -76,8 +77,8 @@ pub fn dispatch(cmd: &GapCmd, json: bool) { capability, priority, source, - task, - } => cmd_gap_add(json, epic, capability, priority, source, task.as_deref()), + task: _, + } => cmd_gap_add(json, epic, capability, priority, source), GapCmd::List { epic, status } => cmd_gap_list(json, epic, status.as_deref()), GapCmd::Resolve { epic, @@ -91,13 +92,7 @@ pub fn dispatch(cmd: &GapCmd, json: bool) { // ── Helpers ──────────────────────────────────────────────────────── -/// Open DB connection (hard error if unavailable). -fn require_db() -> crate::commands::db_shim::Connection { - crate::commands::db_shim::require_db() - .unwrap_or_else(|e| error_exit(&format!("DB required: {e}"))) -} - -/// Verify .flow/ exists, epic ID is valid, and epic exists (DB or JSON). +/// Verify .flow/ exists, epic ID is valid, and epic exists. fn validate_epic(_json: bool, epic_id: &str) { let flow_dir = get_flow_dir(); if !flow_dir.exists() { @@ -106,11 +101,8 @@ fn validate_epic(_json: bool, epic_id: &str) { if !is_epic_id(epic_id) { error_exit(&format!("Invalid epic ID: {}", epic_id)); } - // Check DB first, fall back to JSON file existence. - // Epic may exist only in JSON if DB upsert hasn't run yet. - let conn = require_db(); - let repo = crate::commands::db_shim::EpicRepo::new(&conn); - if repo.get(epic_id).is_ok() { + // Check JSON file existence + if flowctl_core::json_store::epic_read(&flow_dir, epic_id).is_ok() { return; } let json_path = flow_dir.join("epics").join(format!("{epic_id}.json")); @@ -120,6 +112,10 @@ fn validate_epic(_json: bool, epic_id: &str) { error_exit(&format!("Epic not found: {}", epic_id)); } +fn gap_store() -> FlowStore { + FlowStore::new(get_flow_dir()) +} + // ── Commands ─────────────────────────────────────────────────────── fn cmd_gap_add( @@ -128,96 +124,102 @@ fn cmd_gap_add( capability: &str, priority: &str, source: &str, - task: Option<&str>, ) { validate_epic(json_mode, epic_id); - let conn = require_db(); - let gap_repo = crate::commands::db_shim::GapRepo::new(&conn); + let store = gap_store(); + let gap_store = store.gaps(); + + let mut gaps = gap_store.read(epic_id).unwrap_or_default(); // Check for existing gap with same capability (idempotent) - if let Ok(existing) = gap_repo.list(epic_id, None) { - let cap_lower = capability.trim().to_lowercase(); - if let Some(gap) = existing.iter().find(|g| g.capability.trim().to_lowercase() == cap_lower) { - if json_mode { - json_output(json!({ - "id": gap.id, - "created": false, - "gap": { - "id": gap.id, - "capability": gap.capability, - "priority": gap.priority, - "status": gap.status, - "source": gap.source, - "task": gap.task_id, - }, - "message": format!("Gap already exists: {}", gap.id), - })); - } else { - println!( - "Gap already exists: {} \u{2014} {}", - gap.id, gap.capability - ); - } - return; + let cap_lower = capability.trim().to_lowercase(); + if let Some(existing) = gaps.iter().find(|g| g.capability.trim().to_lowercase() == cap_lower) { + let status = if existing.resolved { "resolved" } else { "open" }; + if json_mode { + json_output(json!({ + "id": existing.id, + "created": false, + "gap": { + "id": existing.id, + "capability": existing.capability, + "priority": existing.priority, + "status": status, + "source": existing.source, + }, + "message": format!("Gap already exists: {}", existing.id), + })); + } else { + println!( + "Gap already exists: {} \u{2014} {}", + existing.id, existing.capability + ); } + return; } - match gap_repo.add(epic_id, capability.trim(), priority, Some(source), task) { - Ok(gap_id) => { - if json_mode { - json_output(json!({ - "id": gap_id, - "created": true, - "gap": { - "id": gap_id, - "capability": capability.trim(), - "priority": priority, - "status": "open", - "source": source, - "task": task, - }, - "message": format!("Gap {} added to {}", gap_id, epic_id), - })); - } else { - println!("Gap {} added: [{}] {}", gap_id, priority, capability.trim()); - } - } - Err(e) => { - error_exit(&format!("Failed to add gap: {e}")); - } + let next_id = gaps.iter().map(|g| g.id).max().unwrap_or(0) + 1; + gaps.push(GapEntry { + id: next_id, + capability: capability.trim().to_string(), + priority: priority.to_string(), + source: source.to_string(), + resolved: false, + }); + + if let Err(e) = gap_store.write(epic_id, &gaps) { + error_exit(&format!("Failed to add gap: {e}")); + } + + if json_mode { + json_output(json!({ + "id": next_id, + "created": true, + "gap": { + "id": next_id, + "capability": capability.trim(), + "priority": priority, + "status": "open", + "source": source, + }, + "message": format!("Gap {} added to {}", next_id, epic_id), + })); + } else { + println!("Gap {} added: [{}] {}", next_id, priority, capability.trim()); } } fn cmd_gap_list(json_mode: bool, epic_id: &str, status_filter: Option<&str>) { validate_epic(json_mode, epic_id); - let conn = require_db(); - let gap_repo = crate::commands::db_shim::GapRepo::new(&conn); - - let gaps = gap_repo.list(epic_id, status_filter).unwrap_or_default(); + let store = gap_store(); + let gaps = store.gaps().read(epic_id).unwrap_or_default(); + + let filtered: Vec<&GapEntry> = gaps.iter().filter(|g| { + match status_filter { + Some("open") => !g.resolved, + Some("resolved") => g.resolved, + _ => true, + } + }).collect(); if json_mode { - let gap_values: Vec = gaps + let gap_values: Vec = filtered .iter() .map(|g| { json!({ "id": g.id, "capability": g.capability, "priority": g.priority, - "status": g.status, + "status": if g.resolved { "resolved" } else { "open" }, "source": g.source, - "task": g.task_id, - "added_at": g.created_at, - "resolved_at": g.resolved_at, - "evidence": g.evidence, }) }) .collect(); json_output(json!({ "epic": epic_id, - "count": gaps.len(), + "count": filtered.len(), "gaps": gap_values, })); - } else if gaps.is_empty() { + } else if filtered.is_empty() { let suffix = status_filter .map(|s| format!(" (status={})", s)) .unwrap_or_default(); @@ -226,21 +228,9 @@ fn cmd_gap_list(json_mode: bool, epic_id: &str, status_filter: Option<&str>) { } else { use std::fmt::Write as _; let mut buf = String::new(); - for g in &gaps { - let marker = if g.status == "resolved" { - "\u{2713}" - } else { - "\u{2717}" - }; - writeln!( - buf, - " {} {} [{}] {}", - marker, - g.id, - g.priority, - g.capability, - ) - .ok(); + for g in &filtered { + let marker = if g.resolved { "\u{2713}" } else { "\u{2717}" }; + writeln!(buf, " {} {} [{}] {}", marker, g.id, g.priority, g.capability).ok(); } pretty_output("gap", &buf); } @@ -251,22 +241,28 @@ fn cmd_gap_resolve( epic_id: &str, capability: Option<&str>, gap_id_direct: Option<&str>, - evidence: &str, + _evidence: &str, ) { validate_epic(json_mode, epic_id); - let conn = require_db(); - let gap_repo = crate::commands::db_shim::GapRepo::new(&conn); + let store = gap_store(); + let gap_st = store.gaps(); + let mut gaps = gap_st.read(epic_id).unwrap_or_default(); if let Some(direct_id) = gap_id_direct { - // Resolve by numeric ID - let gap_id: i64 = direct_id + let gap_id: u32 = direct_id .parse() .unwrap_or_else(|_| error_exit(&format!("Invalid gap ID: {}", direct_id))); - if let Err(e) = gap_repo.resolve(gap_id, evidence) { - error_exit(&format!("Failed to resolve gap {}: {e}", gap_id)); + if let Some(g) = gaps.iter_mut().find(|g| g.id == gap_id) { + g.resolved = true; + } else { + error_exit(&format!("Gap {} not found", gap_id)); } + gap_st.write(epic_id, &gaps).unwrap_or_else(|e| { + error_exit(&format!("Failed to resolve gap: {e}")); + }); + if json_mode { json_output(json!({ "id": gap_id, @@ -274,14 +270,21 @@ fn cmd_gap_resolve( "message": format!("Gap {} resolved", gap_id), })); } else { - println!("Gap {} resolved: {}", gap_id, evidence); + println!("Gap {} resolved", gap_id); } } else if let Some(cap) = capability { - // Resolve by capability name - if let Err(e) = gap_repo.resolve_by_capability(epic_id, cap, evidence) { - error_exit(&format!("Failed to resolve gap by capability '{}': {e}", cap)); + let cap_lower = cap.trim().to_lowercase(); + let found = gaps.iter_mut().find(|g| g.capability.trim().to_lowercase() == cap_lower); + if let Some(g) = found { + g.resolved = true; + } else { + error_exit(&format!("Gap for capability '{}' not found", cap)); } + gap_st.write(epic_id, &gaps).unwrap_or_else(|e| { + error_exit(&format!("Failed to resolve gap: {e}")); + }); + if json_mode { json_output(json!({ "capability": cap, @@ -289,7 +292,7 @@ fn cmd_gap_resolve( "message": format!("Gap for '{}' resolved", cap), })); } else { - println!("Gap for '{}' resolved: {}", cap, evidence); + println!("Gap for '{}' resolved", cap); } } else { error_exit("Either --capability or --id is required"); @@ -298,24 +301,22 @@ fn cmd_gap_resolve( fn cmd_gap_check(json_mode: bool, epic_id: &str) { validate_epic(json_mode, epic_id); - let conn = require_db(); - let gap_repo = crate::commands::db_shim::GapRepo::new(&conn); + let store = gap_store(); + let all_gaps = store.gaps().read(epic_id).unwrap_or_default(); - let all_gaps = gap_repo.list(epic_id, None).unwrap_or_default(); - - let open_blocking: Vec<&crate::commands::db_shim::GapRow> = all_gaps + let open_blocking: Vec<&GapEntry> = all_gaps .iter() - .filter(|g| g.status == "open" && GAP_BLOCKING_PRIORITIES.contains(&g.priority.as_str())) + .filter(|g| !g.resolved && GAP_BLOCKING_PRIORITIES.contains(&g.priority.as_str())) .collect(); - let open_non_blocking: Vec<&crate::commands::db_shim::GapRow> = all_gaps + let open_non_blocking: Vec<&GapEntry> = all_gaps .iter() - .filter(|g| g.status == "open" && !GAP_BLOCKING_PRIORITIES.contains(&g.priority.as_str())) + .filter(|g| !g.resolved && !GAP_BLOCKING_PRIORITIES.contains(&g.priority.as_str())) .collect(); - let resolved: Vec<&crate::commands::db_shim::GapRow> = all_gaps + let resolved: Vec<&GapEntry> = all_gaps .iter() - .filter(|g| g.status == "resolved") + .filter(|g| g.resolved) .collect(); let gate = if open_blocking.is_empty() { @@ -325,14 +326,14 @@ fn cmd_gap_check(json_mode: bool, epic_id: &str) { }; if json_mode { - let to_json = |gaps: &[&crate::commands::db_shim::GapRow]| -> Vec { + let to_json = |gaps: &[&GapEntry]| -> Vec { gaps.iter() .map(|g| { json!({ "id": g.id, "capability": g.capability, "priority": g.priority, - "status": g.status, + "status": if g.resolved { "resolved" } else { "open" }, }) }) .collect() @@ -359,11 +360,7 @@ fn cmd_gap_check(json_mode: bool, epic_id: &str) { open_blocking.len() ); for g in &open_blocking { - println!( - " \u{2717} [{}] {}", - g.priority, - g.capability, - ); + println!(" \u{2717} [{}] {}", g.priority, g.capability); } } diff --git a/flowctl/crates/flowctl-cli/src/commands/helpers.rs b/flowctl/crates/flowctl-cli/src/commands/helpers.rs index 6716f38b..26d9b063 100644 --- a/flowctl/crates/flowctl-cli/src/commands/helpers.rs +++ b/flowctl/crates/flowctl-cli/src/commands/helpers.rs @@ -145,12 +145,10 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> Result<(), std::io::Error> { /// Apply a `Changes` batch via the service-layer `ChangesApplier`. /// -/// This opens the libSQL event DB, spins a short-lived tokio runtime, and -/// applies all mutations (JSON store writes + event logging + DB upserts) in order. +/// Applies all mutations (JSON store writes + event logging) in order. /// Returns the number of mutations applied. Calls `error_exit` on failure. pub fn apply_changes(flow_dir: &Path, changes: &flowctl_core::changes::Changes) -> usize { use crate::output::error_exit; - use flowctl_core::changes::Mutation; use flowctl_service::changes::ChangesApplier; if changes.is_empty() { @@ -159,57 +157,13 @@ pub fn apply_changes(flow_dir: &Path, changes: &flowctl_core::changes::Changes) let actor = resolve_actor(); - // Open DB for event logging + entity upserts - let cwd = env::current_dir().unwrap_or_else(|_| PathBuf::from(".")); - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap_or_else(|e| error_exit(&format!("tokio runtime: {e}"))); + let applier = ChangesApplier::new(flow_dir) + .with_actor(&actor); - let applied = rt.block_on(async { - let db = flowctl_db::open_async(&cwd).await - .unwrap_or_else(|e| error_exit(&format!("Failed to open DB: {e}"))); - let conn = db.connect() - .unwrap_or_else(|e| error_exit(&format!("Failed to connect to DB: {e}"))); - let event_repo = flowctl_db::EventRepo::new(conn); + let result = applier.apply(changes) + .unwrap_or_else(|e| error_exit(&format!("Failed to apply changes: {e}"))); - let applier = ChangesApplier::new(flow_dir, &event_repo) - .with_actor(&actor); - - let result = applier.apply(changes).await - .unwrap_or_else(|e| error_exit(&format!("Failed to apply changes: {e}"))); - - // Dual-write: upsert entities into DB tables so commands like `gap add` - // (which validate via EpicRepo::get) can find them. - let conn2 = db.connect() - .unwrap_or_else(|e| error_exit(&format!("Failed to connect to DB: {e}"))); - let epic_repo = flowctl_db::EpicRepo::new(conn2.clone()); - let task_repo = flowctl_db::TaskRepo::new(conn2); - - for mutation in &changes.mutations { - match mutation { - Mutation::CreateEpic { epic } | Mutation::UpdateEpic { epic } => { - let _ = epic_repo.upsert(epic).await; - } - Mutation::RemoveEpic { id } => { - let _ = epic_repo.delete(id).await; - } - Mutation::CreateTask { task } | Mutation::UpdateTask { task } => { - let _ = task_repo.upsert(task).await; - } - Mutation::RemoveTask { id } => { - let _ = task_repo.delete(id).await; - } - _ => {} - } - } - - // Leak the DB handle to keep it alive (same pattern as db_shim) - std::mem::forget(db); - result.applied - }); - - applied + result.applied } /// Handle dry-run or real apply of a `Changes` batch. diff --git a/flowctl/crates/flowctl-cli/src/commands/log.rs b/flowctl/crates/flowctl-cli/src/commands/log.rs index 5d89405f..9d270628 100644 --- a/flowctl/crates/flowctl-cli/src/commands/log.rs +++ b/flowctl/crates/flowctl-cli/src/commands/log.rs @@ -1,16 +1,17 @@ //! Decision logging commands: log decision, log decisions. //! -//! Records workflow auto-decisions in the events table (event_type = "decision") -//! for post-hoc traceability. Skills call `flowctl log decision` at each auto- -//! decision point; `flowctl log decisions` queries the stored decisions. +//! Records workflow auto-decisions in the JSONL event log for post-hoc +//! traceability. Skills call `flowctl log decision` at each auto-decision +//! point; `flowctl log decisions` queries the stored decisions. use clap::Subcommand; use serde_json::json; use crate::output::{error_exit, json_output, pretty_output}; -use super::db_shim; use super::helpers::get_flow_dir; +use flowctl_db::FlowStore; + #[derive(Subcommand, Debug)] pub enum LogCmd { /// Record a workflow decision. @@ -64,31 +65,27 @@ fn cmd_log_decision( task_id: Option<&str>, ) { let flow_dir = get_flow_dir(); - let conn = db_shim::open(&flow_dir).unwrap_or_else(|e| { - error_exit(&format!("Cannot open DB: {e}")); - }); + let store = FlowStore::new(flow_dir); + + let epic = epic_id.unwrap_or("_global"); - let payload = json!({ + let event = json!({ + "stream_id": format!("decision:{epic}"), + "type": "decision", + "epic_id": epic, + "task_id": task_id, "key": key, "value": value, "reason": reason, - }) - .to_string(); - - let epic = epic_id.unwrap_or("_global"); + "timestamp": chrono::Utc::now().to_rfc3339(), + }); - let repo = flowctl_db::repo::EventRepo::new(conn.inner_conn()); - let id = db_shim::block_on_pub(async { - repo.insert(epic, task_id, "decision", None, Some(&payload), None) - .await - }) - .unwrap_or_else(|e| { + if let Err(e) = store.events().append(&event.to_string()) { error_exit(&format!("Failed to log decision: {e}")); - }); + } if json_mode { json_output(json!({ - "id": id, "event_type": "decision", "key": key, "value": value, @@ -103,70 +100,66 @@ fn cmd_log_decision( fn cmd_log_decisions(json_mode: bool, epic_id: Option<&str>, limit: usize) { let flow_dir = get_flow_dir(); - let conn = db_shim::open(&flow_dir).unwrap_or_else(|e| { - error_exit(&format!("Cannot open DB: {e}")); + let store = FlowStore::new(flow_dir); + + let all_lines = store.events().read_all().unwrap_or_else(|e| { + error_exit(&format!("Failed to query decisions: {e}")); }); - let repo = flowctl_db::repo::EventRepo::new(conn.inner_conn()); - let events = if let Some(epic) = epic_id { - db_shim::block_on_pub(async { repo.list_by_epic(epic, limit * 2).await }) - .unwrap_or_else(|e| { - error_exit(&format!("Failed to query decisions: {e}")); - }) - .into_iter() - .filter(|e| e.event_type == "decision") - .take(limit) - .collect::>() - } else { - db_shim::block_on_pub(async { repo.list_by_type("decision", limit).await }) - .unwrap_or_else(|e| { - error_exit(&format!("Failed to query decisions: {e}")); - }) - }; + // Filter for decision events, optionally by epic + let mut decisions: Vec = Vec::new(); + for line in all_lines.iter().rev() { + if let Ok(val) = serde_json::from_str::(line) { + let event_type = val.get("type").and_then(|v| v.as_str()).unwrap_or(""); + if event_type != "decision" { + continue; + } + if let Some(epic) = epic_id { + let eid = val.get("epic_id").and_then(|v| v.as_str()).unwrap_or(""); + if eid != epic { + continue; + } + } + decisions.push(val); + if decisions.len() >= limit { + break; + } + } + } if json_mode { - let items: Vec<_> = events + let items: Vec = decisions .iter() .map(|e| { - let payload: serde_json::Value = e - .payload - .as_deref() - .and_then(|p| serde_json::from_str(p).ok()) - .unwrap_or(json!(null)); json!({ - "id": e.id, - "timestamp": e.timestamp, - "epic_id": e.epic_id, - "task_id": e.task_id, - "key": payload["key"], - "value": payload["value"], - "reason": payload["reason"], + "timestamp": e.get("timestamp").and_then(|v| v.as_str()).unwrap_or(""), + "epic_id": e.get("epic_id").and_then(|v| v.as_str()).unwrap_or(""), + "task_id": e.get("task_id"), + "key": e.get("key").and_then(|v| v.as_str()).unwrap_or(""), + "value": e.get("value").and_then(|v| v.as_str()).unwrap_or(""), + "reason": e.get("reason").and_then(|v| v.as_str()).unwrap_or(""), }) }) .collect(); json_output(json!({ "decisions": items, "count": items.len() })); } else { - if events.is_empty() { + if decisions.is_empty() { pretty_output("log", "No decisions recorded."); return; } - pretty_output("log", &format!("Decisions ({}):", events.len())); - for e in &events { - let payload: serde_json::Value = e - .payload - .as_deref() - .and_then(|p| serde_json::from_str(p).ok()) - .unwrap_or(json!(null)); + pretty_output("log", &format!("Decisions ({}):", decisions.len())); + for e in &decisions { + let ts = e.get("timestamp").and_then(|v| v.as_str()).unwrap_or("?"); + let key = e.get("key").and_then(|v| v.as_str()).unwrap_or("?"); + let value = e.get("value").and_then(|v| v.as_str()).unwrap_or("?"); + let reason = e.get("reason").and_then(|v| v.as_str()).unwrap_or("?"); + let epic = e.get("epic_id").and_then(|v| v.as_str()).unwrap_or("?"); + let task = e.get("task_id").and_then(|v| v.as_str()).unwrap_or("-"); pretty_output( "log", &format!( " [{}] {}={} — {} (epic: {}, task: {})", - &e.timestamp[..19], - payload["key"].as_str().unwrap_or("?"), - payload["value"].as_str().unwrap_or("?"), - payload["reason"].as_str().unwrap_or("?"), - e.epic_id, - e.task_id.as_deref().unwrap_or("-"), + &ts[..std::cmp::min(19, ts.len())], key, value, reason, epic, task, ), ); } diff --git a/flowctl/crates/flowctl-cli/src/commands/mod.rs b/flowctl/crates/flowctl-cli/src/commands/mod.rs index dfc3c07c..cfb87922 100644 --- a/flowctl/crates/flowctl-cli/src/commands/mod.rs +++ b/flowctl/crates/flowctl-cli/src/commands/mod.rs @@ -1,7 +1,6 @@ //! Command modules — one file per command group. pub mod helpers; -pub mod db_shim; pub mod admin; pub mod approval; pub mod checkpoint; diff --git a/flowctl/crates/flowctl-cli/src/commands/query.rs b/flowctl/crates/flowctl-cli/src/commands/query.rs index dd6dd0ea..5b78c571 100644 --- a/flowctl/crates/flowctl-cli/src/commands/query.rs +++ b/flowctl/crates/flowctl-cli/src/commands/query.rs @@ -113,13 +113,6 @@ fn task_list_json(task: &Task) -> serde_json::Value { }) } -// ── DB bridge for file locks (stays in DB) ───────────────────────── - -fn require_db() -> crate::commands::db_shim::Connection { - crate::commands::db_shim::require_db() - .unwrap_or_else(|e| error_exit(&format!("Cannot open database: {}", e))) -} - // ── JSON file data access ────────────────────────────────────────── /// Get a single epic by ID from JSON files. @@ -599,30 +592,25 @@ pub fn cmd_files(json_mode: bool, epic: String) { pub fn cmd_lock(json: bool, task: String, files: String, mode: String) { - let _flow_dir = ensure_flow_exists(); + let flow_dir = ensure_flow_exists(); let file_list: Vec<&str> = files.split(',').map(str::trim).filter(|s| !s.is_empty()).collect(); if file_list.is_empty() { error_exit("No files specified for locking."); } - let lock_mode = crate::commands::db_shim::LockMode::from_str(&mode) - .unwrap_or_else(|e| error_exit(&format!("Invalid lock mode: {}", e))); - - let conn = require_db(); - let repo = crate::commands::db_shim::FileLockRepo::new(&conn); + let store = flowctl_db::FlowStore::new(flow_dir); + let lock_store = store.locks(); let mut locked = Vec::new(); let mut already_locked = Vec::new(); for file in &file_list { - match repo.acquire(file, &task, &lock_mode) { + match lock_store.acquire(file, &task, &mode) { Ok(()) => locked.push(file.to_string()), - Err(crate::commands::db_shim::DbError::Constraint(msg)) => { - // Already locked — find out by whom - let entries = repo.check_locks(file).ok().unwrap_or_default(); - let owners: Vec = entries.iter().map(|e| format!("{}({})", e.task_id, e.lock_mode.as_str())).collect(); - already_locked.push(json!({"file": file, "owners": owners, "detail": msg})); + Err(flowctl_db::DbError::Constraint(msg)) => { + let holder = lock_store.check(file).ok().flatten().unwrap_or_default(); + already_locked.push(json!({"file": file, "owners": [format!("{}({mode})", holder)], "detail": msg})); } Err(e) => { error_exit(&format!("Failed to lock {}: {}", file, e)); @@ -652,12 +640,12 @@ pub fn cmd_lock(json: bool, task: String, files: String, mode: String) { } pub fn cmd_unlock(json: bool, task: Option, _files: Option, all: bool) { - let _flow_dir = ensure_flow_exists(); - let conn = require_db(); - let repo = crate::commands::db_shim::FileLockRepo::new(&conn); + let flow_dir = ensure_flow_exists(); + let store = flowctl_db::FlowStore::new(flow_dir); + let lock_store = store.locks(); if all { - match repo.release_all() { + match lock_store.release_all() { Ok(count) => { if json { json_output(json!({ @@ -680,7 +668,7 @@ pub fn cmd_unlock(json: bool, task: Option, _files: Option, all: } }; - match repo.release_for_task(&task_id) { + match lock_store.release_for_task(&task_id) { Ok(count) => { if json { json_output(json!({ @@ -697,30 +685,25 @@ pub fn cmd_unlock(json: bool, task: Option, _files: Option, all: } pub fn cmd_lock_check(json: bool, file: Option) { - let _flow_dir = ensure_flow_exists(); - let conn = require_db(); - let repo = crate::commands::db_shim::FileLockRepo::new(&conn); + let flow_dir = ensure_flow_exists(); + let store = flowctl_db::FlowStore::new(flow_dir); + let lock_store = store.locks(); match file { Some(f) => { - match repo.check_locks(&f) { - Ok(entries) if !entries.is_empty() => { - let lock_info: Vec = entries.iter().map(|e| json!({ - "task_id": e.task_id, - "mode": e.lock_mode.as_str(), - })).collect(); + match lock_store.check(&f) { + Ok(Some(task_id)) => { if json { json_output(json!({ "file": f, "locked": true, - "locks": lock_info, + "locks": [{"task_id": task_id, "mode": "write"}], })); } else { - let owners: Vec = entries.iter().map(|e| format!("{}({})", e.task_id, e.lock_mode.as_str())).collect(); - println!("{}: locked by {}", f, owners.join(", ")); + println!("{}: locked by {}", f, task_id); } } - Ok(_) => { + Ok(None) => { if json { json_output(json!({ "file": f, @@ -734,18 +717,16 @@ pub fn cmd_lock_check(json: bool, file: Option) { } } None => { - // List all locks - let lock_repo = crate::commands::db_shim::FileLockRepo::new(&conn); - let rows = lock_repo - .list_all() + let entries = lock_store + .list() .unwrap_or_else(|e| { error_exit(&format!("Query failed: {}", e)); }); - let locks: Vec = rows + let locks: Vec = entries .into_iter() - .map(|(file, task_id, locked_at, lock_mode)| json!({ - "file": file, - "task_id": task_id, - "locked_at": locked_at, - "mode": lock_mode, + .map(|entry| json!({ + "file": entry.file_path, + "task_id": entry.task_id, + "locked_at": entry.locked_at, + "mode": entry.mode, })) .collect(); @@ -774,21 +755,15 @@ pub fn cmd_lock_check(json: bool, file: Option) { pub fn cmd_heartbeat(json: bool, task: String) { let _flow_dir = ensure_flow_exists(); - let conn = require_db(); - let repo = crate::commands::db_shim::FileLockRepo::new(&conn); - - match repo.heartbeat(&task) { - Ok(count) => { - if json { - json_output(json!({ - "task": task, - "extended": count, - "message": format!("Extended TTL for {} lock(s)", count), - })); - } else { - println!("Extended TTL for {} lock(s) for task {}", count, task); - } - } - Err(e) => error_exit(&format!("Failed to heartbeat: {}", e)), + // Heartbeat is a no-op with file-based locks (no TTL expiry). + // We still report success for protocol compatibility. + if json { + json_output(json!({ + "task": task, + "extended": 0, + "message": "Heartbeat acknowledged (file-based locks have no TTL)", + })); + } else { + println!("Heartbeat acknowledged for task {} (file-based locks have no TTL)", task); } } diff --git a/flowctl/crates/flowctl-cli/src/commands/scout_cache.rs b/flowctl/crates/flowctl-cli/src/commands/scout_cache.rs index 63509517..56ff0570 100644 --- a/flowctl/crates/flowctl-cli/src/commands/scout_cache.rs +++ b/flowctl/crates/flowctl-cli/src/commands/scout_cache.rs @@ -1,11 +1,12 @@ //! Scout cache commands: `flowctl scout-cache get|set|clear`. +//! +//! File-based scout cache stored in `.state/scout-cache/` directory. use clap::Subcommand; use serde_json::json; use crate::output::{error_exit, json_output}; - -use super::db_shim; +use super::helpers::get_flow_dir; #[derive(Subcommand, Debug)] pub enum ScoutCacheCmd { @@ -53,54 +54,40 @@ fn detect_commit(explicit: &Option) -> String { .unwrap_or_else(|| "no-git".to_string()) } +/// Get the cache directory, creating it if needed. +fn cache_dir() -> std::path::PathBuf { + let dir = get_flow_dir().join(".state").join("scout-cache"); + let _ = std::fs::create_dir_all(&dir); + dir +} + +/// Sanitize a cache key for use as a filename. +fn key_to_filename(key: &str) -> String { + key.replace([':', '/', '\\'], "_") +} + pub fn dispatch(cmd: &ScoutCacheCmd, json_mode: bool) { match cmd { ScoutCacheCmd::Get { scout_type, commit } => { let c = detect_commit(commit); let key = format!("{scout_type}:{c}"); + let path = cache_dir().join(key_to_filename(&key)); - // Get DB connection outside async (require_db uses its own runtime). - let conn = match db_shim::require_db() { - Ok(c) => c, - Err(_) => { - if json_mode { - json_output(json!({"hit": false, "key": key})); - } else { - println!("miss (db unavailable)"); - } - return; + if path.exists() { + let result = std::fs::read_to_string(&path).unwrap_or_default(); + if json_mode { + let parsed: serde_json::Value = + serde_json::from_str(&result) + .unwrap_or(serde_json::Value::String(result)); + json_output(json!({"hit": true, "key": key, "result": parsed})); + } else { + println!("hit: {}", result); } - }; - - let repo = flowctl_db::ScoutCacheRepo::new(conn.inner_conn()); - db_shim::block_on_pub(async { - match repo.get(&key).await { - Ok(Some(result)) => { - if json_mode { - let parsed: serde_json::Value = - serde_json::from_str(&result) - .unwrap_or(serde_json::Value::String(result)); - json_output(json!({"hit": true, "key": key, "result": parsed})); - } else { - println!("hit: {}", result); - } - } - Ok(None) => { - if json_mode { - json_output(json!({"hit": false, "key": key})); - } else { - println!("miss"); - } - } - Err(_) => { - if json_mode { - json_output(json!({"hit": false, "key": key})); - } else { - println!("miss (db error)"); - } - } - } - }); + } else if json_mode { + json_output(json!({"hit": false, "key": key})); + } else { + println!("miss"); + } } ScoutCacheCmd::Set { scout_type, @@ -117,15 +104,9 @@ pub fn dispatch(cmd: &ScoutCacheCmd, json_mode: bool) { result.to_string() }; - let conn = db_shim::require_db() - .unwrap_or_else(|e| error_exit(&format!("DB unavailable: {e}"))); - - let repo = flowctl_db::ScoutCacheRepo::new(conn.inner_conn()); - db_shim::block_on_pub(async { - repo.set(&key, &c, scout_type, &result_data) - .await - .unwrap_or_else(|e| error_exit(&format!("Failed to cache: {e}"))); - }); + let path = cache_dir().join(key_to_filename(&key)); + std::fs::write(&path, &result_data) + .unwrap_or_else(|e| error_exit(&format!("Failed to cache: {e}"))); if json_mode { json_output(json!({"ok": true, "key": key})); @@ -134,15 +115,15 @@ pub fn dispatch(cmd: &ScoutCacheCmd, json_mode: bool) { } } ScoutCacheCmd::Clear => { - let conn = db_shim::require_db() - .unwrap_or_else(|e| error_exit(&format!("DB unavailable: {e}"))); - - let repo = flowctl_db::ScoutCacheRepo::new(conn.inner_conn()); - let n = db_shim::block_on_pub(async { - repo.clear() - .await - .unwrap_or_else(|e| error_exit(&format!("Failed to clear: {e}"))) - }); + let dir = cache_dir(); + let mut n = 0u64; + if let Ok(entries) = std::fs::read_dir(&dir) { + for entry in entries.flatten() { + if std::fs::remove_file(entry.path()).is_ok() { + n += 1; + } + } + } if json_mode { json_output(json!({"ok": true, "cleared": n})); diff --git a/flowctl/crates/flowctl-cli/src/commands/skill.rs b/flowctl/crates/flowctl-cli/src/commands/skill.rs index 11bee184..418208d1 100644 --- a/flowctl/crates/flowctl-cli/src/commands/skill.rs +++ b/flowctl/crates/flowctl-cli/src/commands/skill.rs @@ -1,11 +1,8 @@ -//! Skill commands: register, match. +//! Skill commands: register. //! //! `skill register` scans `skills/*/SKILL.md` files, extracts YAML -//! frontmatter (name + description), and upserts each into the DB with -//! a BGE-small embedding for semantic matching. -//! -//! `skill match` performs semantic vector search against registered -//! skills and returns ranked results. +//! frontmatter (name + description), and logs them. With file-based storage, +//! skill registration is a scan-and-report operation (no DB upsert needed). use clap::Subcommand; use serde::Deserialize; @@ -13,29 +10,16 @@ use serde_json::json; use crate::output::{error_exit, json_output, pretty_output}; -use super::db_shim; - // ── CLI definition ───────────────────────────────────────────────── #[derive(Subcommand, Debug)] pub enum SkillCmd { - /// Scan skills/*/SKILL.md and register into DB with embeddings. + /// Scan skills/*/SKILL.md and register into DB. Register { /// Directory to scan (default: DROID_PLUGIN_ROOT or CLAUDE_PLUGIN_ROOT). #[arg(long)] dir: Option, }, - /// Semantic search against registered skills. - Match { - /// Search query text. - query: String, - /// Maximum results to return. - #[arg(long, default_value = "5")] - limit: usize, - /// Minimum cosine similarity threshold. - #[arg(long, default_value = "0.70")] - threshold: f64, - }, } // ── Frontmatter struct ───────────────────────────────────────────── @@ -51,11 +35,6 @@ struct SkillFrontmatter { pub fn dispatch(cmd: &SkillCmd, json: bool) { match cmd { SkillCmd::Register { dir } => cmd_skill_register(json, dir.as_deref()), - SkillCmd::Match { - query, - limit, - threshold, - } => cmd_skill_match(json, query, *limit, *threshold), } } @@ -120,28 +99,6 @@ fn cmd_skill_register(json: bool, dir: Option<&str>) { )); } - // Upsert each skill into DB. - let conn = db_shim::require_db().unwrap_or_else(|e| { - error_exit(&format!("Cannot open DB: {e}")); - }); - - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to create tokio runtime"); - - let repo = flowctl_db::skill::SkillRepo::new(conn.inner_conn()); - - for (name, desc, path) in &entries { - rt.block_on(async { - repo.upsert(name, desc, Some(path.as_str())) - .await - .unwrap_or_else(|e| { - eprintln!("warn: failed to upsert skill '{}': {e}", name); - }); - }); - } - let skills_json: Vec = entries .iter() .map(|(n, d, _)| json!({"name": n, "description": d})) @@ -159,54 +116,3 @@ fn cmd_skill_register(json: bool, dir: Option<&str>) { } } } - -// ── Match ────────────────────────────────────────────────────────── - -fn cmd_skill_match(json: bool, query: &str, limit: usize, threshold: f64) { - let conn = db_shim::require_db().unwrap_or_else(|e| { - error_exit(&format!("Cannot open DB: {e}")); - }); - - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to create tokio runtime"); - - let repo = flowctl_db::skill::SkillRepo::new(conn.inner_conn()); - let matches = rt.block_on(async { - repo.match_skills(query, limit, threshold) - .await - .unwrap_or_else(|e| { - error_exit(&format!("match_skills failed: {e}")); - }) - }); - - if json { - let out: Vec = matches - .iter() - .map(|m| { - json!({ - "name": m.name, - "description": m.description, - "score": (m.score * 100.0).round() / 100.0, - }) - }) - .collect(); - json_output(json!(out)); - } else { - if matches.is_empty() { - pretty_output("skill_match", "No matching skills found."); - return; - } - pretty_output( - "skill_match", - &format!(" {:<6} {:<28} {}", "Score", "Name", "Description"), - ); - for m in &matches { - pretty_output( - "skill_match", - &format!(" {:<6.2} {:<28} {}", m.score, m.name, m.description), - ); - } - } -} diff --git a/flowctl/crates/flowctl-cli/src/commands/stats.rs b/flowctl/crates/flowctl-cli/src/commands/stats.rs index 2c6fa274..7ad74dd1 100644 --- a/flowctl/crates/flowctl-cli/src/commands/stats.rs +++ b/flowctl/crates/flowctl-cli/src/commands/stats.rs @@ -1,26 +1,15 @@ //! Stats command: flowctl stats [--epic ] [--weekly] [--tokens] [--bottlenecks] [--dora] [--format json] //! //! TTY-aware: table output for terminals, JSON when piped or --json is passed. +//! Stats are computed from JSON file store (epics, tasks, state files). -use std::env; use std::io::IsTerminal; -use std::path::PathBuf; use clap::Subcommand; use serde_json::json; use crate::output::{error_exit, json_output, pretty_output}; - -/// Open DB or exit with error. -fn open_db_or_exit() -> crate::commands::db_shim::Connection { - let cwd = env::current_dir().unwrap_or_else(|_| PathBuf::from(".")); - match crate::commands::db_shim::open(&cwd) { - Ok(conn) => conn, - Err(e) => { - error_exit(&format!("Cannot open database: {}", e)); - } - } -} +use super::helpers::get_flow_dir; /// Determine if output should be JSON: explicit --json flag, or stdout is not a terminal. fn should_json(json_flag: bool) -> bool { @@ -68,9 +57,9 @@ pub fn dispatch(cmd: &StatsCmd, json_flag: bool) { match cmd { StatsCmd::Summary => cmd_summary(json_flag), StatsCmd::Epic { id } => cmd_epic(json_flag, id.as_deref()), - StatsCmd::Weekly { weeks } => cmd_weekly(json_flag, *weeks), - StatsCmd::Tokens { epic } => cmd_tokens(json_flag, epic.as_deref()), - StatsCmd::Bottlenecks { limit } => cmd_bottlenecks(json_flag, *limit), + StatsCmd::Weekly { weeks: _ } => cmd_weekly(json_flag), + StatsCmd::Tokens { epic: _ } => cmd_tokens(json_flag), + StatsCmd::Bottlenecks { limit: _ } => cmd_bottlenecks(json_flag), StatsCmd::Dora => cmd_dora(json_flag), StatsCmd::Rollup => cmd_rollup(json_flag), StatsCmd::Cleanup => cmd_cleanup(json_flag), @@ -78,279 +67,173 @@ pub fn dispatch(cmd: &StatsCmd, json_flag: bool) { } fn cmd_summary(json_flag: bool) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); + let flow_dir = get_flow_dir(); + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap_or_default(); + let open_epics = epics.iter().filter(|e| e.status.to_string() == "open").count(); + + let mut total_tasks = 0i64; + let mut done_tasks = 0i64; + let mut in_progress_tasks = 0i64; + let mut blocked_tasks = 0i64; + + for epic in &epics { + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, &epic.id).unwrap_or_default(); + for task in &tasks { + total_tasks += 1; + match task.status { + flowctl_core::state_machine::Status::Done => done_tasks += 1, + flowctl_core::state_machine::Status::InProgress => in_progress_tasks += 1, + flowctl_core::state_machine::Status::Blocked => blocked_tasks += 1, + _ => {} + } + } + } - let summary = match stats.summary() { - Ok(s) => s, - Err(e) => error_exit(&format!("Failed to query stats: {}", e)), - }; + let store = flowctl_db::FlowStore::new(flow_dir); + let total_events = store.events().read_all().map(|v| v.len() as i64).unwrap_or(0); if should_json(json_flag) { json_output(json!({ - "total_epics": summary.total_epics, - "open_epics": summary.open_epics, - "total_tasks": summary.total_tasks, - "done_tasks": summary.done_tasks, - "in_progress_tasks": summary.in_progress_tasks, - "blocked_tasks": summary.blocked_tasks, - "total_events": summary.total_events, - "total_tokens": summary.total_tokens, - "total_cost_usd": summary.total_cost_usd, + "total_epics": epics.len(), + "open_epics": open_epics, + "total_tasks": total_tasks, + "done_tasks": done_tasks, + "in_progress_tasks": in_progress_tasks, + "blocked_tasks": blocked_tasks, + "total_events": total_events, + "total_tokens": 0, + "total_cost_usd": 0.0, })); } else { println!("flowctl Stats Summary"); println!("{}", "=".repeat(40)); - println!("Epics: {} total, {} open", summary.total_epics, summary.open_epics); + println!("Epics: {} total, {} open", epics.len(), open_epics); println!( "Tasks: {} total, {} done, {} in progress, {} blocked", - summary.total_tasks, summary.done_tasks, summary.in_progress_tasks, summary.blocked_tasks + total_tasks, done_tasks, in_progress_tasks, blocked_tasks ); - println!("Events: {}", summary.total_events); - println!("Tokens: {}", format_tokens(summary.total_tokens)); - println!("Cost: ${:.4}", summary.total_cost_usd); + println!("Events: {}", total_events); } } -fn cmd_epic(json_flag: bool, epic_id: Option<&str>) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - let epics = match stats.per_epic(epic_id) { - Ok(e) => e, - Err(e) => error_exit(&format!("Failed to query epic stats: {}", e)), +fn cmd_epic(json_flag: bool, epic_filter: Option<&str>) { + let flow_dir = get_flow_dir(); + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap_or_default(); + let filtered: Vec<_> = if let Some(eid) = epic_filter { + epics.into_iter().filter(|e| e.id == eid).collect() + } else { + epics }; + let mut data: Vec = Vec::new(); + for epic in &filtered { + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, &epic.id).unwrap_or_default(); + let done_count = tasks.iter().filter(|t| t.status == flowctl_core::state_machine::Status::Done).count(); + data.push(json!({ + "epic_id": epic.id, + "title": epic.title, + "status": epic.status.to_string(), + "task_count": tasks.len(), + "done_count": done_count, + "avg_duration_secs": 0, + "total_tokens": 0, + "total_cost": 0.0, + })); + } + if should_json(json_flag) { - let data: Vec = epics.iter().map(|e| json!({ - "epic_id": e.epic_id, - "title": e.title, - "status": e.status, - "task_count": e.task_count, - "done_count": e.done_count, - "avg_duration_secs": e.avg_duration_secs, - "total_tokens": e.total_tokens, - "total_cost": e.total_cost, - })).collect(); json_output(json!({ "epics": data, "count": data.len() })); - } else if epics.is_empty() { + } else if data.is_empty() { println!("No epic stats found."); } else { - println!("{:<30} {:>6} {:>5}/{:>5} {:>10} {:>10}", "EPIC", "STATUS", "DONE", "TOTAL", "TOKENS", "COST"); - println!("{}", "-".repeat(75)); - for e in &epics { + println!("{:<30} {:>6} {:>5}/{:>5}", "EPIC", "STATUS", "DONE", "TOTAL"); + println!("{}", "-".repeat(55)); + for e in &data { println!( - "{:<30} {:>6} {:>5}/{:>5} {:>10} {:>10}", - truncate(&e.epic_id, 30), - e.status, - e.done_count, - e.task_count, - format_tokens(e.total_tokens), - format!("${:.4}", e.total_cost), + "{:<30} {:>6} {:>5}/{:>5}", + truncate(e["epic_id"].as_str().unwrap_or(""), 30), + e["status"].as_str().unwrap_or(""), + e["done_count"], + e["task_count"], ); } } } -fn cmd_weekly(json_flag: bool, weeks: u32) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - let trends = match stats.weekly_trends(weeks) { - Ok(t) => t, - Err(e) => error_exit(&format!("Failed to query weekly trends: {}", e)), - }; - +fn cmd_weekly(json_flag: bool) { if should_json(json_flag) { - let data: Vec = trends.iter().map(|t| json!({ - "week": t.week, - "tasks_started": t.tasks_started, - "tasks_completed": t.tasks_completed, - "tasks_failed": t.tasks_failed, - })).collect(); - json_output(json!({ "weekly_trends": data })); - } else if trends.is_empty() { - println!("No weekly trend data available."); + json_output(json!({ "weekly_trends": [], "message": "Weekly trends not available (file-based storage)" })); } else { - println!("{:<12} {:>8} {:>10} {:>8}", "WEEK", "STARTED", "COMPLETED", "FAILED"); - println!("{}", "-".repeat(42)); - for t in &trends { - println!("{:<12} {:>8} {:>10} {:>8}", t.week, t.tasks_started, t.tasks_completed, t.tasks_failed); - } + println!("Weekly trends not available with file-based storage."); } } -fn cmd_tokens(json_flag: bool, epic_id: Option<&str>) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - let tokens = match stats.token_breakdown(epic_id) { - Ok(t) => t, - Err(e) => error_exit(&format!("Failed to query token usage: {}", e)), - }; - +fn cmd_tokens(json_flag: bool) { if should_json(json_flag) { - let data: Vec = tokens.iter().map(|t| json!({ - "epic_id": t.epic_id, - "model": t.model, - "input_tokens": t.input_tokens, - "output_tokens": t.output_tokens, - "cache_read": t.cache_read, - "cache_write": t.cache_write, - "estimated_cost": t.estimated_cost, - })).collect(); - json_output(json!({ "token_usage": data })); - } else if tokens.is_empty() { - println!("No token usage data."); + json_output(json!({ "token_usage": [], "message": "Token tracking not available (file-based storage)" })); } else { - println!("{:<25} {:<20} {:>10} {:>10} {:>10}", "EPIC", "MODEL", "INPUT", "OUTPUT", "COST"); - println!("{}", "-".repeat(80)); - for t in &tokens { - println!( - "{:<25} {:<20} {:>10} {:>10} {:>10}", - truncate(&t.epic_id, 25), - truncate(&t.model, 20), - format_tokens(t.input_tokens), - format_tokens(t.output_tokens), - format!("${:.4}", t.estimated_cost), - ); - } + println!("Token usage tracking not available with file-based storage."); } } -fn cmd_bottlenecks(json_flag: bool, limit: usize) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - let bottlenecks = match stats.bottlenecks(limit) { - Ok(b) => b, - Err(e) => error_exit(&format!("Failed to query bottlenecks: {}", e)), - }; - +fn cmd_bottlenecks(json_flag: bool) { if should_json(json_flag) { - let data: Vec = bottlenecks.iter().map(|b| json!({ - "task_id": b.task_id, - "epic_id": b.epic_id, - "title": b.title, - "duration_secs": b.duration_secs, - "status": b.status, - "blocked_reason": b.blocked_reason, - })).collect(); - json_output(json!({ "bottlenecks": data })); - } else if bottlenecks.is_empty() { - println!("No bottleneck data."); + json_output(json!({ "bottlenecks": [], "message": "Bottleneck analysis not available (file-based storage)" })); } else { - println!("{:<25} {:<10} {:>10} TITLE", "TASK", "STATUS", "DURATION"); - println!("{}", "-".repeat(70)); - for b in &bottlenecks { - let duration = b.duration_secs - .map(format_duration) - .unwrap_or_else(|| "-".to_string()); - let suffix = b.blocked_reason.as_ref() - .map(|r| format!(" [blocked: {}]", truncate(r, 30))) - .unwrap_or_default(); - println!( - "{:<25} {:<10} {:>10} {}{}", - truncate(&b.task_id, 25), - b.status, - duration, - truncate(&b.title, 30), - suffix, - ); - } + println!("Bottleneck analysis not available with file-based storage."); } } fn cmd_dora(json_flag: bool) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - let dora = match stats.dora_metrics() { - Ok(d) => d, - Err(e) => error_exit(&format!("Failed to compute DORA metrics: {}", e)), - }; - if should_json(json_flag) { json_output(json!({ - "lead_time_hours": dora.lead_time_hours, - "throughput_per_week": dora.throughput_per_week, - "change_failure_rate": dora.change_failure_rate, - "time_to_restore_hours": dora.time_to_restore_hours, + "lead_time_hours": null, + "throughput_per_week": 0.0, + "change_failure_rate": 0.0, + "time_to_restore_hours": null, + "message": "DORA metrics not available (file-based storage)", })); } else { - println!("DORA Metrics (last 30 days)"); - println!("{}", "=".repeat(40)); - println!( - "Lead Time: {}", - dora.lead_time_hours - .map(|h| format!("{:.1}h", h)) - .unwrap_or_else(|| "N/A".to_string()) - ); - println!("Throughput: {:.1} tasks/week", dora.throughput_per_week); - println!("Change Failure Rate: {:.1}%", dora.change_failure_rate * 100.0); - println!( - "Time to Restore: {}", - dora.time_to_restore_hours - .map(|h| format!("{:.1}h", h)) - .unwrap_or_else(|| "N/A".to_string()) - ); + println!("DORA metrics not available with file-based storage."); } } fn cmd_rollup(json_flag: bool) { - let conn = open_db_or_exit(); - let stats = crate::commands::db_shim::StatsQuery::new(&conn); - - match stats.generate_monthly_rollups() { - Ok(count) => { - if should_json(json_flag) { - json_output(json!({ "months_updated": count })); - } else { - println!("Updated {} monthly rollup(s).", count); - } - } - Err(e) => error_exit(&format!("Failed to generate rollups: {}", e)), + if should_json(json_flag) { + json_output(json!({ "months_updated": 0, "message": "Rollups not applicable (file-based storage)" })); + } else { + println!("Rollups not applicable with file-based storage."); } } fn cmd_cleanup(json_flag: bool) { - let conn = open_db_or_exit(); - - match crate::commands::db_shim::cleanup(&conn) { - Ok(count) => { - if should_json(json_flag) { - json_output(json!({ "deleted": count })); - } else { - println!("Cleaned up {} old record(s).", count); - } - } - Err(e) => error_exit(&format!("Cleanup failed: {}", e)), + if should_json(json_flag) { + json_output(json!({ "deleted": 0, "message": "Cleanup not applicable (file-based storage)" })); + } else { + println!("Cleanup not applicable with file-based storage."); } } // ── DAG rendering ──────────────────────────────────────────────────── pub fn cmd_dag(json_flag: bool, epic_id: Option) { - let conn = open_db_or_exit(); - let task_repo = crate::commands::db_shim::TaskRepo::new(&conn); + let flow_dir = get_flow_dir(); - // Find epic: use provided ID or find the first open epic let epic_id = match epic_id { Some(id) => id, None => { - let epic_repo = crate::commands::db_shim::EpicRepo::new(&conn); - match epic_repo.list(Some("open")) { - Ok(epics) if !epics.is_empty() => epics[0].id.clone(), - _ => error_exit("No open epic found. Use --epic to specify."), + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap_or_default(); + match epics.iter().find(|e| e.status.to_string() == "open") { + Some(e) => e.id.clone(), + None => error_exit("No open epic found. Use --epic to specify."), } } }; - let tasks = match task_repo.list_by_epic(&epic_id) { - Ok(t) if !t.is_empty() => t, - Ok(_) => error_exit(&format!("No tasks found for epic {}", epic_id)), - Err(e) => error_exit(&format!("Failed to load tasks: {}", e)), - }; + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, &epic_id).unwrap_or_default(); + if tasks.is_empty() { + error_exit(&format!("No tasks found for epic {}", epic_id)); + } let dag = match flowctl_core::TaskDag::from_tasks(&tasks) { Ok(d) => { @@ -423,7 +306,6 @@ pub fn cmd_dag(json_flag: bool, epic_id: Option) { flowctl_core::Status::Todo => "todo", _ => " ?? ", }; - // Short ID: take just the task number suffix let short_id = task.id.rsplit('.').next().unwrap_or(&task.id); let label = format!(".{} [{}]", short_id, status_icon); let indent = " ".repeat(layer); @@ -433,7 +315,6 @@ pub fn cmd_dag(json_flag: bool, epic_id: Option) { writeln!(buf, "{}{}\u{2514}\u{2500}{}\u{2500}\u{2518}", indent, if layer > 0 { " " } else { "" }, "\u{2500}".repeat(label.len())).ok(); } - // Draw arrows between layers if layer < max_layer { let next_layer_nodes: Vec<&flowctl_core::types::Task> = tasks .iter() @@ -452,27 +333,20 @@ pub fn cmd_dag(json_flag: bool, epic_id: Option) { // ── Estimate command ───────────────────────────────────────────────── pub fn cmd_estimate(json_flag: bool, epic_id: &str) { - let conn = open_db_or_exit(); - let task_repo = crate::commands::db_shim::TaskRepo::new(&conn); - let runtime_repo = crate::commands::db_shim::RuntimeRepo::new(&conn); - - let tasks = match task_repo.list_by_epic(epic_id) { - Ok(t) => t, - Err(e) => error_exit(&format!("Failed to load tasks: {}", e)), - }; + let flow_dir = get_flow_dir(); + let tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, epic_id).unwrap_or_default(); if tasks.is_empty() { error_exit(&format!("No tasks found for epic {}", epic_id)); } - // Collect durations from completed tasks let mut completed_durations: Vec = Vec::new(); let mut incomplete_count = 0u32; for task in &tasks { if task.status == flowctl_core::Status::Done { - if let Ok(Some(rt)) = runtime_repo.get(&task.id) { - if let Some(dur) = rt.duration_secs { + if let Ok(state) = flowctl_core::json_store::state_read(&flow_dir, &task.id) { + if let Some(dur) = state.duration_seconds { completed_durations.push(dur); } } @@ -517,26 +391,6 @@ pub fn cmd_estimate(json_flag: bool, epic_id: &str) { // ── Formatting helpers ──────────────────────────────────────────────── -fn format_tokens(n: i64) -> String { - if n >= 1_000_000 { - format!("{:.1}M", n as f64 / 1_000_000.0) - } else if n >= 1_000 { - format!("{:.1}K", n as f64 / 1_000.0) - } else { - n.to_string() - } -} - -fn format_duration(secs: i64) -> String { - if secs >= 3600 { - format!("{:.1}h", secs as f64 / 3600.0) - } else if secs >= 60 { - format!("{}m", secs / 60) - } else { - format!("{}s", secs) - } -} - fn truncate(s: &str, max: usize) -> String { if s.len() <= max { s.to_string() diff --git a/flowctl/crates/flowctl-cli/src/commands/workflow/lifecycle.rs b/flowctl/crates/flowctl-cli/src/commands/workflow/lifecycle.rs index 648c88a7..2d89b7b8 100644 --- a/flowctl/crates/flowctl-cli/src/commands/workflow/lifecycle.rs +++ b/flowctl/crates/flowctl-cli/src/commands/workflow/lifecycle.rs @@ -7,16 +7,15 @@ use serde_json::json; use crate::output::{error_exit, json_output}; use flowctl_core::state_machine::Status; -use flowctl_db::EventStoreRepo; +use flowctl_db::FlowStore; use flowctl_service::lifecycle::{ BlockTaskRequest, DoneTaskRequest, FailTaskRequest, RestartTaskRequest, StartTaskRequest, }; -use super::{block_on, ensure_flow_exists, resolve_actor, try_open_lsql_conn}; +use super::{ensure_flow_exists, resolve_actor}; pub fn cmd_start(json_mode: bool, id: String, force: bool, _note: Option) { let flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); let actor = resolve_actor(); let req = StartTaskRequest { @@ -25,7 +24,7 @@ pub fn cmd_start(json_mode: bool, id: String, force: bool, _note: Option actor, }; - match block_on(flowctl_service::lifecycle::start_task(conn.as_ref(), &flow_dir, req)) { + match flowctl_service::lifecycle::start_task(&flow_dir, req) { Ok(resp) => { if json_mode { json_output(json!({ @@ -51,7 +50,6 @@ pub fn cmd_done( force: bool, ) { let flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); let actor = resolve_actor(); let req = DoneTaskRequest { @@ -64,7 +62,7 @@ pub fn cmd_done( actor, }; - match block_on(flowctl_service::lifecycle::done_task(conn.as_ref(), &flow_dir, req)) { + match flowctl_service::lifecycle::done_task(&flow_dir, req) { Ok(resp) => { if json_mode { let mut result = json!({ @@ -101,14 +99,13 @@ pub fn cmd_done( pub fn cmd_block(json_mode: bool, id: String, reason: String) { let flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); let req = BlockTaskRequest { task_id: id.clone(), reason, }; - match block_on(flowctl_service::lifecycle::block_task(conn.as_ref(), &flow_dir, req)) { + match flowctl_service::lifecycle::block_task(&flow_dir, req) { Ok(resp) => { if json_mode { json_output(json!({ @@ -126,7 +123,6 @@ pub fn cmd_block(json_mode: bool, id: String, reason: String) { pub fn cmd_fail(json_mode: bool, id: String, reason: Option, force: bool) { let flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); let req = FailTaskRequest { task_id: id.clone(), @@ -134,7 +130,7 @@ pub fn cmd_fail(json_mode: bool, id: String, reason: Option, force: bool force, }; - match block_on(flowctl_service::lifecycle::fail_task(conn.as_ref(), &flow_dir, req)) { + match flowctl_service::lifecycle::fail_task(&flow_dir, req) { Ok(resp) => { if json_mode { let mut result = json!({ @@ -171,7 +167,6 @@ pub fn cmd_fail(json_mode: bool, id: String, reason: Option, force: bool pub fn cmd_restart(json_mode: bool, id: String, dry_run: bool, force: bool) { let flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); let req = RestartTaskRequest { task_id: id.clone(), @@ -179,7 +174,7 @@ pub fn cmd_restart(json_mode: bool, id: String, dry_run: bool, force: bool) { force, }; - match block_on(flowctl_service::lifecycle::restart_task(conn.as_ref(), &flow_dir, req)) { + match flowctl_service::lifecycle::restart_task(&flow_dir, req) { Ok(resp) => { if dry_run { if json_mode { @@ -194,8 +189,6 @@ pub fn cmd_restart(json_mode: bool, id: String, dry_run: bool, force: bool) { "Dry run \u{2014} would restart {} task(s):", resp.reset_ids.len() ); - // In dry-run mode we don't have per-task status info in the response, - // so just list the IDs for tid in &resp.reset_ids { let marker = if resp.in_progress_overridden.contains(tid) { " (force)" @@ -237,45 +230,41 @@ pub fn cmd_restart(json_mode: bool, id: String, dry_run: bool, force: bool) { } pub fn cmd_events(json_mode: bool, epic_id: String) { - let _flow_dir = ensure_flow_exists(); - let conn = try_open_lsql_conn(); - - let conn = match conn { - Some(c) => c, - None => { - error_exit("Cannot open database for event store query"); - } - }; + let flow_dir = ensure_flow_exists(); - let repo = EventStoreRepo::new(conn); + let store = FlowStore::new(flow_dir.to_path_buf()); + let events_store = store.events(); - // Query both the epic stream and all task streams for this epic - let prefixes = vec![ - format!("epic:{epic_id}"), - format!("task:{epic_id}."), - ]; + // Read all events and filter by epic prefix + match events_store.read_all() { + Ok(lines) => { + // Parse events and filter by epic + let mut matching: Vec = Vec::new(); + for line in &lines { + if let Ok(val) = serde_json::from_str::(line) { + let stream = val.get("stream_id").and_then(|s| s.as_str()).unwrap_or(""); + let eid = val.get("epic_id").and_then(|s| s.as_str()).unwrap_or(""); + if stream.contains(&epic_id) || eid == epic_id { + matching.push(val); + } + } + } - match block_on(repo.query_by_stream_prefixes(&prefixes)) { - Ok(events) => { if json_mode { - let items: Vec = events - .iter() - .map(|e| serde_json::to_value(e).unwrap_or_default()) - .collect(); json_output(json!({ "epic": epic_id, - "count": events.len(), - "events": items, + "count": matching.len(), + "events": matching, })); - } else if events.is_empty() { + } else if matching.is_empty() { println!("No events found for epic {epic_id}"); } else { - println!("Events for epic {} ({} total):\n", epic_id, events.len()); - for e in &events { - println!( - " [{}] {} v{} — {} ({})", - e.event_id, e.stream_id, e.version, e.event_type, e.created_at, - ); + println!("Events for epic {} ({} total):\n", epic_id, matching.len()); + for e in &matching { + let stream = e.get("stream_id").and_then(|s| s.as_str()).unwrap_or("?"); + let event_type = e.get("type").and_then(|s| s.as_str()).unwrap_or("?"); + let ts = e.get("timestamp").and_then(|s| s.as_str()).unwrap_or("?"); + println!(" [{}] {} — {}", stream, event_type, ts); } } } diff --git a/flowctl/crates/flowctl-cli/src/commands/workflow/mod.rs b/flowctl/crates/flowctl-cli/src/commands/workflow/mod.rs index e81b2e3a..92632f84 100644 --- a/flowctl/crates/flowctl-cli/src/commands/workflow/mod.rs +++ b/flowctl/crates/flowctl-cli/src/commands/workflow/mod.rs @@ -28,15 +28,9 @@ use super::helpers::{ensure_flow_symlink, get_flow_dir, resolve_actor}; // ── Helpers ───────────────────────────────────────────────────────── /// Ensure .flow/ exists, auto-creating the symlink if needed. -/// -/// In worktree environments (e.g., Claude Code `isolation: "worktree"`), -/// `.flow/` may not exist because the worktree was created outside the -/// flow-code worktree kit. Auto-create the symlink so workers are -/// self-healing. pub(crate) fn ensure_flow_exists() -> PathBuf { let flow_dir = get_flow_dir(); if !flow_dir.exists() { - // Try to create the .flow/ symlink (idempotent). let cwd = env::current_dir().unwrap_or_else(|_| PathBuf::from(".")); if let Err(e) = ensure_flow_symlink(&cwd) { error_exit(&format!(".flow/ does not exist and auto-create failed: {e}. Run 'flowctl init' first.")); @@ -48,35 +42,6 @@ pub(crate) fn ensure_flow_exists() -> PathBuf { flow_dir } -/// Bridge: DB connection for functions not yet migrated (phase progress, runtime). -pub(crate) fn require_db() -> crate::commands::db_shim::Connection { - crate::commands::db_shim::require_db() - .unwrap_or_else(|e| error_exit(&format!("Cannot open database: {e}"))) -} - -/// Try to open a libSQL async DB connection (for service-layer calls). -pub(crate) fn try_open_lsql_conn() -> Option { - let cwd = env::current_dir().ok()?; - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .ok()?; - rt.block_on(async { - let db = flowctl_db::open_async(&cwd).await.ok()?; - db.connect().ok() - }) -} - -/// Block the current thread on a future (for invoking async service calls -/// from sync CLI code). -pub(crate) fn block_on(fut: F) -> F::Output { - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to create tokio runtime"); - rt.block_on(fut) -} - /// Load all tasks for an epic from JSON files. pub(crate) fn load_tasks_for_epic(flow_dir: &Path, epic_id: &str) -> HashMap { let tasks = flowctl_core::json_store::task_list_by_epic(flow_dir, epic_id).unwrap_or_default(); diff --git a/flowctl/crates/flowctl-cli/src/commands/workflow/phase.rs b/flowctl/crates/flowctl-cli/src/commands/workflow/phase.rs index 31ecb44e..cb6e42a7 100644 --- a/flowctl/crates/flowctl-cli/src/commands/workflow/phase.rs +++ b/flowctl/crates/flowctl-cli/src/commands/workflow/phase.rs @@ -11,7 +11,7 @@ use flowctl_core::config::read_config_bool; use flowctl_core::id::is_task_id; use flowctl_core::types::TaskSize; -use super::{ensure_flow_exists, require_db}; +use super::ensure_flow_exists; /// Worker-phase subcommands. #[derive(Subcommand, Debug)] @@ -165,22 +165,20 @@ fn migrate_phase_id(id: &str) -> String { } } -/// Load completed phases from SQLite, migrating legacy IDs. -fn load_completed_phases(task_id: &str) -> Vec { - let conn = require_db(); - let repo = crate::commands::db_shim::PhaseProgressRepo::new(&conn); - repo.get_completed(task_id) +/// Load completed phases from file store, migrating legacy IDs. +fn load_completed_phases(flow_dir: &std::path::Path, task_id: &str) -> Vec { + let store = flowctl_db::FlowStore::new(flow_dir.to_path_buf()); + store.phases().get_completed(task_id) .unwrap_or_default() .into_iter() .map(|id| migrate_phase_id(&id)) .collect() } -/// Mark a phase as done in SQLite. -fn save_phase_done(task_id: &str, phase: &str) { - let conn = require_db(); - let repo = crate::commands::db_shim::PhaseProgressRepo::new(&conn); - if let Err(e) = repo.mark_done(task_id, phase) { +/// Mark a phase as done in file store. +fn save_phase_done(flow_dir: &std::path::Path, task_id: &str, phase: &str) { + let store = flowctl_db::FlowStore::new(flow_dir.to_path_buf()); + if let Err(e) = store.phases().mark_done(task_id, phase) { eprintln!("Warning: failed to save phase progress: {}", e); } } @@ -206,7 +204,7 @@ pub fn dispatch_worker_phase(cmd: &WorkerPhaseCmd, json_mode: bool) { } fn cmd_worker_phase_next(json_mode: bool, task_id: &str, tdd: bool, review: Option<&str>, size: TaskSize) { - let _flow_dir = ensure_flow_exists(); + let flow_dir = ensure_flow_exists(); if !is_task_id(task_id) { error_exit(&format!( @@ -216,7 +214,7 @@ fn cmd_worker_phase_next(json_mode: bool, task_id: &str, tdd: bool, review: Opti } let seq = build_phase_sequence(tdd, review.is_some(), size); - let completed = load_completed_phases(task_id); + let completed = load_completed_phases(&flow_dir, task_id); let completed_set: HashSet<&str> = completed.iter().map(std::string::String::as_str).collect(); @@ -277,7 +275,7 @@ fn cmd_worker_phase_done( review: Option<&str>, size: TaskSize, ) { - let _flow_dir = ensure_flow_exists(); + let flow_dir = ensure_flow_exists(); if !is_task_id(task_id) { error_exit(&format!( @@ -298,7 +296,7 @@ fn cmd_worker_phase_done( )); } - let completed = load_completed_phases(task_id); + let completed = load_completed_phases(&flow_dir, task_id); let completed_set: HashSet<&str> = completed.iter().map(std::string::String::as_str).collect(); @@ -319,10 +317,10 @@ fn cmd_worker_phase_done( } // Mark phase done - save_phase_done(task_id, phase); + save_phase_done(&flow_dir, task_id, phase); // Reload to get updated state - let updated_completed = load_completed_phases(task_id); + let updated_completed = load_completed_phases(&flow_dir, task_id); let updated_set: HashSet<&str> = updated_completed.iter().map(std::string::String::as_str).collect(); let next_phase = seq.iter().find(|p| !updated_set.contains(**p)).copied(); diff --git a/flowctl/crates/flowctl-cli/src/commands/workflow/pipeline_phase.rs b/flowctl/crates/flowctl-cli/src/commands/workflow/pipeline_phase.rs index 3da1aadf..8f69293d 100644 --- a/flowctl/crates/flowctl-cli/src/commands/workflow/pipeline_phase.rs +++ b/flowctl/crates/flowctl-cli/src/commands/workflow/pipeline_phase.rs @@ -1,16 +1,17 @@ //! Pipeline phase commands: `flowctl phase next` and `flowctl phase done`. //! -//! These commands manage the epic-level pipeline progression stored in the -//! `pipeline_progress` table. Distinct from worker-phase (task-level phases). +//! These commands manage the epic-level pipeline progression stored in +//! `.state/pipeline.json`. Distinct from worker-phase (task-level phases). use clap::Subcommand; use serde_json::json; use flowctl_core::pipeline::PipelinePhase; +use flowctl_db::FlowStore; use crate::output::{error_exit, json_output}; -use super::require_db; +use super::ensure_flow_exists; /// Pipeline phase subcommands. #[derive(Subcommand, Debug)] @@ -40,70 +41,31 @@ pub fn dispatch_pipeline_phase(cmd: &PipelinePhaseCmd, json: bool) { } } -/// Read current pipeline phase from DB. If no row exists, initialize to Plan. -fn get_or_init_phase(epic_id: &str) -> PipelinePhase { - let conn = require_db(); - let raw = conn.inner_conn(); - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio runtime"); - - rt.block_on(async { - let mut rows = raw - .query( - "SELECT phase FROM pipeline_progress WHERE epic_id = ?1", - libsql::params![epic_id], - ) - .await - .unwrap_or_else(|e| { - error_exit(&format!("DB query failed: {e}")); - }); - - if let Some(row) = rows.next().await.unwrap_or(None) { - let phase_str: String = row.get(0).unwrap_or_else(|_| "plan".to_string()); - PipelinePhase::parse(&phase_str).unwrap_or(PipelinePhase::Plan) - } else { - // No row — initialize with Plan phase. - let now = chrono::Utc::now().to_rfc3339(); - raw.execute( - "INSERT INTO pipeline_progress (epic_id, phase, started_at, updated_at) VALUES (?1, ?2, ?3, ?4)", - libsql::params![epic_id, "plan", now.clone(), now], - ) - .await - .unwrap_or_else(|e| { - error_exit(&format!("DB insert failed: {e}")); - }); +/// Read current pipeline phase from file. If no entry exists, initialize to Plan. +fn get_or_init_phase(flow_dir: &std::path::Path, epic_id: &str) -> PipelinePhase { + let store = FlowStore::new(flow_dir.to_path_buf()); + match store.pipeline().read(epic_id) { + Ok(Some(phase_str)) => PipelinePhase::parse(&phase_str).unwrap_or(PipelinePhase::Plan), + _ => { + // No entry — initialize with Plan phase. + let _ = store.pipeline().write(epic_id, "plan"); PipelinePhase::Plan } - }) + } } -/// Update pipeline phase in DB. -fn update_phase(epic_id: &str, new_phase: &PipelinePhase) { - let conn = require_db(); - let raw = conn.inner_conn(); - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio runtime"); - - rt.block_on(async { - let now = chrono::Utc::now().to_rfc3339(); - raw.execute( - "UPDATE pipeline_progress SET phase = ?1, updated_at = ?2 WHERE epic_id = ?3", - libsql::params![new_phase.as_str(), now, epic_id], - ) - .await - .unwrap_or_else(|e| { - error_exit(&format!("DB update failed: {e}")); - }); - }); +/// Update pipeline phase in file store. +fn update_phase(flow_dir: &std::path::Path, epic_id: &str, new_phase: &PipelinePhase) { + let store = FlowStore::new(flow_dir.to_path_buf()); + if let Err(e) = store.pipeline().write(epic_id, new_phase.as_str()) { + error_exit(&format!("Failed to update pipeline phase: {e}")); + } } /// `flowctl phase next --epic --json` fn cmd_phase_next(json: bool, epic_id: &str) { - let current = get_or_init_phase(epic_id); + let flow_dir = ensure_flow_exists(); + let current = get_or_init_phase(&flow_dir, epic_id); let all_done = current.is_terminal(); if json { @@ -123,6 +85,8 @@ fn cmd_phase_next(json: bool, epic_id: &str) { /// `flowctl phase done --epic --phase --json` fn cmd_phase_done(json: bool, epic_id: &str, phase_name: &str) { + let flow_dir = ensure_flow_exists(); + let requested = match PipelinePhase::parse(phase_name) { Some(p) => p, None => { @@ -135,7 +99,7 @@ fn cmd_phase_done(json: bool, epic_id: &str, phase_name: &str) { } }; - let current = get_or_init_phase(epic_id); + let current = get_or_init_phase(&flow_dir, epic_id); if requested != current { error_exit(&format!( @@ -150,7 +114,7 @@ fn cmd_phase_done(json: bool, epic_id: &str, phase_name: &str) { } let next_phase = current.next().expect("non-terminal phase has a next"); - update_phase(epic_id, &next_phase); + update_phase(&flow_dir, epic_id, &next_phase); if json { json_output(json!({ diff --git a/flowctl/crates/flowctl-cli/tests/export_import_test.rs b/flowctl/crates/flowctl-cli/tests/export_import_test.rs index abb762ad..bd9ac4b0 100644 --- a/flowctl/crates/flowctl-cli/tests/export_import_test.rs +++ b/flowctl/crates/flowctl-cli/tests/export_import_test.rs @@ -1,10 +1,9 @@ -//! Integration tests for export/import round-trip (async libSQL). +//! Integration tests for export/import round-trip (file-based). //! -//! Tests the DB → Markdown → DB path by: -//! 1. Creating an in-memory DB with test data -//! 2. Writing Markdown files using frontmatter::write -//! 3. Re-importing via flowctl_db::reindex -//! 4. Verifying data matches +//! Tests the JSON → Markdown → JSON path by: +//! 1. Writing epic/task JSON + Markdown files +//! 2. Reading them back via json_store +//! 3. Verifying data matches use std::fs; @@ -54,83 +53,55 @@ fn make_test_task(id: &str, epic: &str, title: &str) -> Task { } } -#[tokio::test] -async fn export_import_round_trip() { +#[test] +fn export_import_round_trip() { let tmp = tempfile::TempDir::new().unwrap(); - let flow_dir = tmp.path().join(".flow"); - fs::create_dir_all(&flow_dir).unwrap(); + let flow_dir = tmp.path().to_path_buf(); - // Step 1: Create DB with test data. - let (_db, conn) = flowctl_db::open_memory_async().await.unwrap(); - let epic_repo = flowctl_db::EpicRepo::new(conn.clone()); - let task_repo = flowctl_db::TaskRepo::new(conn.clone()); + // Step 1: Write epic and task to JSON store. + flowctl_core::json_store::ensure_dirs(&flow_dir).unwrap(); let epic = make_test_epic("fn-50-roundtrip", "Round Trip Test"); let epic_body = "## Description\nThis is the epic body content."; - epic_repo.upsert_with_body(&epic, epic_body).await.unwrap(); + flowctl_core::json_store::epic_write(&flow_dir, &epic).unwrap(); let task = make_test_task("fn-50-roundtrip.1", "fn-50-roundtrip", "First Task"); let task_body = "## Implementation\nDo the thing."; - task_repo.upsert_with_body(&task, task_body).await.unwrap(); + flowctl_core::json_store::task_write_definition(&flow_dir, &task).unwrap(); // Step 2: Export to Markdown files. let epics_dir = flow_dir.join(EPICS_DIR); let tasks_dir = flow_dir.join(TASKS_DIR); - fs::create_dir_all(&epics_dir).unwrap(); - fs::create_dir_all(&tasks_dir).unwrap(); - let (exported_epic, body) = epic_repo.get_with_body("fn-50-roundtrip").await.unwrap(); let doc = frontmatter::Document { - frontmatter: exported_epic, - body: body.clone(), + frontmatter: epic.clone(), + body: epic_body.to_string(), }; let content = frontmatter::write(&doc).unwrap(); fs::write(epics_dir.join("fn-50-roundtrip.md"), &content).unwrap(); - let (exported_task, tbody) = task_repo - .get_with_body("fn-50-roundtrip.1") - .await - .unwrap(); let tdoc = frontmatter::Document { - frontmatter: exported_task, - body: tbody.clone(), + frontmatter: task.clone(), + body: task_body.to_string(), }; let tcontent = frontmatter::write(&tdoc).unwrap(); fs::write(tasks_dir.join("fn-50-roundtrip.1.md"), &tcontent).unwrap(); - // Step 3: Import into a fresh DB. - let (_db2, conn2) = flowctl_db::open_memory_async().await.unwrap(); - let result = flowctl_db::reindex(&conn2, &flow_dir, None) - .await - .unwrap(); - - assert_eq!(result.epics_indexed, 1); - assert_eq!(result.tasks_indexed, 1); - - // Step 4: Verify data matches. - let repo2 = flowctl_db::EpicRepo::new(conn2.clone()); - let (reimported_epic, reimported_body) = repo2.get_with_body("fn-50-roundtrip").await.unwrap(); + // Step 3: Verify data can be read back from JSON store. + let reimported_epic = flowctl_core::json_store::epic_read(&flow_dir, "fn-50-roundtrip").unwrap(); assert_eq!(reimported_epic.title, "Round Trip Test"); - assert_eq!(reimported_body.trim(), epic_body.trim()); - let trepo2 = flowctl_db::TaskRepo::new(conn2); - let (reimported_task, reimported_tbody) = - trepo2.get_with_body("fn-50-roundtrip.1").await.unwrap(); - assert_eq!(reimported_task.title, "First Task"); - assert_eq!(reimported_tbody.trim(), task_body.trim()); + let reimported_tasks = flowctl_core::json_store::task_list_by_epic(&flow_dir, "fn-50-roundtrip").unwrap(); + assert_eq!(reimported_tasks.len(), 1); + assert_eq!(reimported_tasks[0].title, "First Task"); } -#[tokio::test] -async fn export_empty_db_produces_no_files() { +#[test] +fn empty_flow_dir_produces_no_data() { let tmp = tempfile::TempDir::new().unwrap(); - let flow_dir = tmp.path().join(".flow"); - let epics_dir = flow_dir.join(EPICS_DIR); - let tasks_dir = flow_dir.join(TASKS_DIR); - fs::create_dir_all(&epics_dir).unwrap(); - fs::create_dir_all(&tasks_dir).unwrap(); + let flow_dir = tmp.path().to_path_buf(); + flowctl_core::json_store::ensure_dirs(&flow_dir).unwrap(); - let (_db, conn) = flowctl_db::open_memory_async().await.unwrap(); - let epic_repo = flowctl_db::EpicRepo::new(conn); - let epics = epic_repo.list(None).await.unwrap(); + let epics = flowctl_core::json_store::epic_list(&flow_dir).unwrap(); assert!(epics.is_empty()); } diff --git a/flowctl/crates/flowctl-cli/tests/integration_test.rs b/flowctl/crates/flowctl-cli/tests/integration_test.rs index 8d489cde..d49779e9 100644 --- a/flowctl/crates/flowctl-cli/tests/integration_test.rs +++ b/flowctl/crates/flowctl-cli/tests/integration_test.rs @@ -366,7 +366,7 @@ fn setup_task(prefix: &str) -> (tempfile::TempDir, String) { (dir, task_id) } -/// Read task status from the DB directly via async libSQL. +/// Read task status from the JSON store. #[allow(dead_code)] fn json_task_status(work_dir: &Path, task_id: &str) -> String { let flow_dir = work_dir.join(".flow"); diff --git a/flowctl/crates/flowctl-core/src/changes.rs b/flowctl/crates/flowctl-core/src/changes.rs index 19c73065..93d30aac 100644 --- a/flowctl/crates/flowctl-core/src/changes.rs +++ b/flowctl/crates/flowctl-core/src/changes.rs @@ -115,7 +115,7 @@ impl Mutation { /// A batch of declarative mutation intents. /// /// Build up mutations, then hand the `Changes` to an applier which executes -/// them against JSON files and the libSQL database. +/// them against JSON files in the `.flow/` directory. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Changes { /// Ordered list of mutations to apply. diff --git a/flowctl/crates/flowctl-core/src/json_store.rs b/flowctl/crates/flowctl-core/src/json_store.rs index 4dddd6e6..a9d9baf1 100644 --- a/flowctl/crates/flowctl-core/src/json_store.rs +++ b/flowctl/crates/flowctl-core/src/json_store.rs @@ -1,4 +1,4 @@ -//! JSON file store for epics and tasks. +//! JSON file store for epics, tasks, and runtime state. //! //! Provides file-based I/O following the `.flow/` directory layout: //! - `epics/.json` — epic definitions @@ -6,15 +6,22 @@ //! - `tasks/.json` — task definitions (no runtime fields) //! - `tasks/.md` — task spec markdown //! - `.state/tasks/.state.json` — runtime state (status, assignee, evidence) - -use std::fs; +//! - `.state/events.jsonl` — append-only event log +//! - `.state/pipeline.json` — epic pipeline progress +//! - `.state/phases.json` — task phase progress +//! - `.state/locks.json` — file locks +//! - `.state/approvals.json` — approval records +//! - `memory/entries.jsonl` — append-only memory entries + +use std::fs::{self, OpenOptions}; +use std::io::Write as _; use std::path::{Path, PathBuf}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use crate::state_machine::Status; -use crate::types::{Epic, Evidence, Task, EPICS_DIR, SPECS_DIR, STATE_DIR, TASKS_DIR}; +use crate::types::{Epic, Evidence, Task, EPICS_DIR, MEMORY_DIR, SPECS_DIR, STATE_DIR, TASKS_DIR}; // ── Error ─────────────────────────────────────────────────────────── @@ -130,6 +137,8 @@ pub fn ensure_dirs(flow_dir: &Path) -> Result<()> { ensure_dir(&specs_dir(flow_dir))?; ensure_dir(&tasks_dir(flow_dir))?; ensure_dir(&state_tasks_dir(flow_dir))?; + ensure_dir(&flow_dir.join(STATE_DIR))?; + ensure_dir(&flow_dir.join(MEMORY_DIR))?; Ok(()) } @@ -439,6 +448,276 @@ pub fn gaps_write(flow_dir: &Path, epic_id: &str, gaps: &[GapEntry]) -> Result<( Ok(()) } +// ── Atomic write helper ──────────────────────────────────────────── + +/// Write content atomically: write to `.tmp`, then rename over target. +fn atomic_write(path: &Path, content: &[u8]) -> Result<()> { + let tmp = path.with_extension("tmp"); + fs::write(&tmp, content)?; + fs::rename(&tmp, path)?; + Ok(()) +} + +// ── Events (.flow/.state/events.jsonl) ───────────────────────────── + +fn events_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(STATE_DIR).join("events.jsonl") +} + +/// Append a JSON event line to the events log. +pub fn events_append(flow_dir: &Path, event_json: &str) -> Result<()> { + ensure_dir(&flow_dir.join(STATE_DIR))?; + let path = events_path(flow_dir); + let mut f = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + writeln!(f, "{}", event_json.trim_end()).map_err(StoreError::Io)?; + Ok(()) +} + +/// Read all event lines from the events log. +pub fn events_read_all(flow_dir: &Path) -> Result> { + let path = events_path(flow_dir); + if !path.exists() { + return Ok(vec![]); + } + let content = fs::read_to_string(&path)?; + Ok(content.lines().filter(|l| !l.is_empty()).map(String::from).collect()) +} + +/// Read events filtered by stream_id (substring match on each line). +pub fn events_read_by_stream(flow_dir: &Path, stream_id: &str) -> Result> { + let needle = format!("\"stream_id\":\"{}\"", stream_id); + let all = events_read_all(flow_dir)?; + Ok(all.into_iter().filter(|line| line.contains(&needle)).collect()) +} + +// ── Pipeline progress (.flow/.state/pipeline.json) ───────────────── + +fn pipeline_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(STATE_DIR).join("pipeline.json") +} + +/// Read the current pipeline phase for an epic. +pub fn pipeline_read(flow_dir: &Path, epic_id: &str) -> Result> { + let path = pipeline_path(flow_dir); + if !path.exists() { + return Ok(None); + } + let content = fs::read_to_string(&path)?; + let map: serde_json::Map = serde_json::from_str(&content)?; + Ok(map.get(epic_id).and_then(|v| v.as_str()).map(String::from)) +} + +/// Set the pipeline phase for an epic (read-modify-write with atomic rename). +pub fn pipeline_write(flow_dir: &Path, epic_id: &str, phase: &str) -> Result<()> { + ensure_dir(&flow_dir.join(STATE_DIR))?; + let path = pipeline_path(flow_dir); + let mut map: serde_json::Map = if path.exists() { + let content = fs::read_to_string(&path)?; + serde_json::from_str(&content)? + } else { + serde_json::Map::new() + }; + map.insert(epic_id.to_string(), serde_json::Value::String(phase.to_string())); + let content = serde_json::to_string_pretty(&map)?; + atomic_write(&path, content.as_bytes())?; + Ok(()) +} + +// ── Phase progress (.flow/.state/phases.json) ────────────────────── + +fn phases_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(STATE_DIR).join("phases.json") +} + +/// Get completed phases for a task. +pub fn phases_completed(flow_dir: &Path, task_id: &str) -> Result> { + let path = phases_path(flow_dir); + if !path.exists() { + return Ok(vec![]); + } + let content = fs::read_to_string(&path)?; + let map: serde_json::Map = serde_json::from_str(&content)?; + match map.get(task_id) { + Some(serde_json::Value::Array(arr)) => { + Ok(arr.iter().filter_map(|v| v.as_str().map(String::from)).collect()) + } + _ => Ok(vec![]), + } +} + +/// Mark a phase as done for a task. +pub fn phase_mark_done(flow_dir: &Path, task_id: &str, phase: &str) -> Result<()> { + ensure_dir(&flow_dir.join(STATE_DIR))?; + let path = phases_path(flow_dir); + let mut map: serde_json::Map = if path.exists() { + let content = fs::read_to_string(&path)?; + serde_json::from_str(&content)? + } else { + serde_json::Map::new() + }; + let phases = map + .entry(task_id.to_string()) + .or_insert_with(|| serde_json::Value::Array(vec![])); + if let serde_json::Value::Array(arr) = phases { + let phase_val = serde_json::Value::String(phase.to_string()); + if !arr.contains(&phase_val) { + arr.push(phase_val); + } + } + let content = serde_json::to_string_pretty(&map)?; + atomic_write(&path, content.as_bytes())?; + Ok(()) +} + +/// Reset all phase progress for a task. +pub fn phases_reset(flow_dir: &Path, task_id: &str) -> Result<()> { + let path = phases_path(flow_dir); + if !path.exists() { + return Ok(()); + } + let content = fs::read_to_string(&path)?; + let mut map: serde_json::Map = serde_json::from_str(&content)?; + map.remove(task_id); + let content = serde_json::to_string_pretty(&map)?; + atomic_write(&path, content.as_bytes())?; + Ok(()) +} + +// ── File locks (.flow/.state/locks.json) ─────────────────────────── + +fn locks_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(STATE_DIR).join("locks.json") +} + +/// A file lock entry. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LockEntry { + pub file_path: String, + pub task_id: String, + pub mode: String, + pub locked_at: String, +} + +/// Read all current locks. +pub fn locks_read(flow_dir: &Path) -> Result> { + let path = locks_path(flow_dir); + if !path.exists() { + return Ok(vec![]); + } + let content = fs::read_to_string(&path)?; + let locks: Vec = serde_json::from_str(&content)?; + Ok(locks) +} + +/// Acquire a lock on a file for a task. +pub fn lock_acquire(flow_dir: &Path, file_path: &str, task_id: &str, mode: &str) -> Result<()> { + ensure_dir(&flow_dir.join(STATE_DIR))?; + let path = locks_path(flow_dir); + let mut locks = locks_read(flow_dir)?; + // Remove existing lock by same task on same file (idempotent) + locks.retain(|l| !(l.file_path == file_path && l.task_id == task_id)); + locks.push(LockEntry { + file_path: file_path.to_string(), + task_id: task_id.to_string(), + mode: mode.to_string(), + locked_at: Utc::now().to_rfc3339(), + }); + let content = serde_json::to_string_pretty(&locks)?; + atomic_write(&path, content.as_bytes())?; + Ok(()) +} + +/// Release all locks held by a task. Returns number released. +pub fn lock_release_task(flow_dir: &Path, task_id: &str) -> Result { + let path = locks_path(flow_dir); + if !path.exists() { + return Ok(0); + } + let mut locks = locks_read(flow_dir)?; + let before = locks.len(); + locks.retain(|l| l.task_id != task_id); + let removed = (before - locks.len()) as u32; + let content = serde_json::to_string_pretty(&locks)?; + atomic_write(&path, content.as_bytes())?; + Ok(removed) +} + +/// Clear all locks. Returns number cleared. +pub fn locks_clear(flow_dir: &Path) -> Result { + let path = locks_path(flow_dir); + if !path.exists() { + return Ok(0); + } + let locks = locks_read(flow_dir)?; + let count = locks.len() as u32; + atomic_write(&path, b"[]")?; + Ok(count) +} + +// ── Memory (.flow/memory/entries.jsonl) ──────────────────────────── + +fn memory_entries_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(MEMORY_DIR).join("entries.jsonl") +} + +/// Append a JSON memory entry. +pub fn memory_append(flow_dir: &Path, entry_json: &str) -> Result<()> { + ensure_dir(&flow_dir.join(MEMORY_DIR))?; + let path = memory_entries_path(flow_dir); + let mut f = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + writeln!(f, "{}", entry_json.trim_end()).map_err(StoreError::Io)?; + Ok(()) +} + +/// Read all memory entries. +pub fn memory_read_all(flow_dir: &Path) -> Result> { + let path = memory_entries_path(flow_dir); + if !path.exists() { + return Ok(vec![]); + } + let content = fs::read_to_string(&path)?; + Ok(content.lines().filter(|l| !l.is_empty()).map(String::from).collect()) +} + +/// Search memory entries by case-insensitive substring match. +pub fn memory_search_text(flow_dir: &Path, query: &str) -> Result> { + let query_lower = query.to_lowercase(); + let all = memory_read_all(flow_dir)?; + Ok(all.into_iter().filter(|line| line.to_lowercase().contains(&query_lower)).collect()) +} + +// ── Approvals (.flow/.state/approvals.json) ──────────────────────── + +fn approvals_path(flow_dir: &Path) -> PathBuf { + flow_dir.join(STATE_DIR).join("approvals.json") +} + +/// Read all approvals. +pub fn approvals_read(flow_dir: &Path) -> Result> { + let path = approvals_path(flow_dir); + if !path.exists() { + return Ok(vec![]); + } + let content = fs::read_to_string(&path)?; + let approvals: Vec = serde_json::from_str(&content)?; + Ok(approvals) +} + +/// Write approvals (atomic). +pub fn approvals_write(flow_dir: &Path, approvals: &[serde_json::Value]) -> Result<()> { + ensure_dir(&flow_dir.join(STATE_DIR))?; + let path = approvals_path(flow_dir); + let content = serde_json::to_string_pretty(approvals)?; + atomic_write(&path, content.as_bytes())?; + Ok(()) +} + // ── Tests ─────────────────────────────────────────────────────────── #[cfg(test)] @@ -688,5 +967,182 @@ mod tests { assert!(specs_dir(flow_dir).exists()); assert!(tasks_dir(flow_dir).exists()); assert!(state_tasks_dir(flow_dir).exists()); + assert!(flow_dir.join(STATE_DIR).exists()); + assert!(flow_dir.join(MEMORY_DIR).exists()); + } + + // ── Events tests ─────────────────────────────────────────────── + + #[test] + fn test_events_append_and_read() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + events_append(flow_dir, r#"{"stream_id":"s1","type":"created"}"#).unwrap(); + events_append(flow_dir, r#"{"stream_id":"s2","type":"updated"}"#).unwrap(); + events_append(flow_dir, r#"{"stream_id":"s1","type":"done"}"#).unwrap(); + + let all = events_read_all(flow_dir).unwrap(); + assert_eq!(all.len(), 3); + + let s1 = events_read_by_stream(flow_dir, "s1").unwrap(); + assert_eq!(s1.len(), 2); + assert!(s1[0].contains("created")); + assert!(s1[1].contains("done")); + + let s2 = events_read_by_stream(flow_dir, "s2").unwrap(); + assert_eq!(s2.len(), 1); + } + + #[test] + fn test_events_empty() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + assert!(events_read_all(flow_dir).unwrap().is_empty()); + assert!(events_read_by_stream(flow_dir, "nope").unwrap().is_empty()); + } + + // ── Pipeline tests ───────────────────────────────────────────── + + #[test] + fn test_pipeline_read_write() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + assert_eq!(pipeline_read(flow_dir, "fn-1").unwrap(), None); + + pipeline_write(flow_dir, "fn-1", "plan").unwrap(); + assert_eq!(pipeline_read(flow_dir, "fn-1").unwrap().as_deref(), Some("plan")); + + pipeline_write(flow_dir, "fn-1", "work").unwrap(); + assert_eq!(pipeline_read(flow_dir, "fn-1").unwrap().as_deref(), Some("work")); + + pipeline_write(flow_dir, "fn-2", "plan").unwrap(); + assert_eq!(pipeline_read(flow_dir, "fn-2").unwrap().as_deref(), Some("plan")); + assert_eq!(pipeline_read(flow_dir, "fn-1").unwrap().as_deref(), Some("work")); + } + + // ── Phases tests ─────────────────────────────────────────────── + + #[test] + fn test_phases_mark_and_read() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + assert!(phases_completed(flow_dir, "t1").unwrap().is_empty()); + + phase_mark_done(flow_dir, "t1", "1").unwrap(); + phase_mark_done(flow_dir, "t1", "2").unwrap(); + phase_mark_done(flow_dir, "t1", "2").unwrap(); // duplicate — no-op + + let completed = phases_completed(flow_dir, "t1").unwrap(); + assert_eq!(completed, vec!["1", "2"]); + } + + #[test] + fn test_phases_reset() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + phase_mark_done(flow_dir, "t1", "1").unwrap(); + phase_mark_done(flow_dir, "t1", "5").unwrap(); + phase_mark_done(flow_dir, "t2", "1").unwrap(); + + phases_reset(flow_dir, "t1").unwrap(); + assert!(phases_completed(flow_dir, "t1").unwrap().is_empty()); + assert_eq!(phases_completed(flow_dir, "t2").unwrap(), vec!["1"]); + } + + // ── Locks tests ──────────────────────────────────────────────── + + #[test] + fn test_locks_acquire_read_release() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + assert!(locks_read(flow_dir).unwrap().is_empty()); + + lock_acquire(flow_dir, "src/a.rs", "t1", "write").unwrap(); + lock_acquire(flow_dir, "src/b.rs", "t1", "read").unwrap(); + lock_acquire(flow_dir, "src/c.rs", "t2", "write").unwrap(); + + let all = locks_read(flow_dir).unwrap(); + assert_eq!(all.len(), 3); + + let released = lock_release_task(flow_dir, "t1").unwrap(); + assert_eq!(released, 2); + + let remaining = locks_read(flow_dir).unwrap(); + assert_eq!(remaining.len(), 1); + assert_eq!(remaining[0].task_id, "t2"); + } + + #[test] + fn test_locks_clear() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + lock_acquire(flow_dir, "a", "t1", "write").unwrap(); + lock_acquire(flow_dir, "b", "t2", "read").unwrap(); + + let cleared = locks_clear(flow_dir).unwrap(); + assert_eq!(cleared, 2); + assert!(locks_read(flow_dir).unwrap().is_empty()); + } + + #[test] + fn test_lock_acquire_idempotent() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + lock_acquire(flow_dir, "a", "t1", "write").unwrap(); + lock_acquire(flow_dir, "a", "t1", "read").unwrap(); // re-lock same file+task + + let locks = locks_read(flow_dir).unwrap(); + assert_eq!(locks.len(), 1); + assert_eq!(locks[0].mode, "read"); + } + + // ── Memory tests ─────────────────────────────────────────────── + + #[test] + fn test_memory_append_and_search() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + memory_append(flow_dir, r#"{"text":"Rust is great"}"#).unwrap(); + memory_append(flow_dir, r#"{"text":"Python is also nice"}"#).unwrap(); + memory_append(flow_dir, r#"{"text":"rust patterns"}"#).unwrap(); + + let all = memory_read_all(flow_dir).unwrap(); + assert_eq!(all.len(), 3); + + let found = memory_search_text(flow_dir, "rust").unwrap(); + assert_eq!(found.len(), 2); + + let none = memory_search_text(flow_dir, "javascript").unwrap(); + assert!(none.is_empty()); + } + + // ── Approvals tests ──────────────────────────────────────────── + + #[test] + fn test_approvals_round_trip() { + let tmp = TempDir::new().unwrap(); + let flow_dir = tmp.path(); + + assert!(approvals_read(flow_dir).unwrap().is_empty()); + + let approvals = vec![ + serde_json::json!({"reviewer": "alice", "status": "approved"}), + serde_json::json!({"reviewer": "bob", "status": "needs_work"}), + ]; + approvals_write(flow_dir, &approvals).unwrap(); + + let read_back = approvals_read(flow_dir).unwrap(); + assert_eq!(read_back.len(), 2); + assert_eq!(read_back[0]["reviewer"], "alice"); + assert_eq!(read_back[1]["status"], "needs_work"); } } diff --git a/flowctl/crates/flowctl-db/Cargo.toml b/flowctl/crates/flowctl-db/Cargo.toml index f6820a63..b2e6f8ad 100644 --- a/flowctl/crates/flowctl-db/Cargo.toml +++ b/flowctl/crates/flowctl-db/Cargo.toml @@ -1,31 +1,19 @@ [package] name = "flowctl-db" version = "0.1.0" -description = "Async libSQL storage layer for flowctl (successor to flowctl-db)" +description = "Sync file-based storage layer for flowctl" edition.workspace = true rust-version.workspace = true license.workspace = true [dependencies] flowctl-core = { workspace = true } -libsql = { workspace = true } -tokio = { workspace = true } -fastembed = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -thiserror = { workspace = true } chrono = { workspace = true } -nix = { workspace = true } -tracing = { workspace = true } [lints] workspace = true [dev-dependencies] tempfile = "3" -criterion = { version = "0.5", features = ["async_tokio"] } -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } - -[[bench]] -name = "event_store" -harness = false diff --git a/flowctl/crates/flowctl-db/benches/event_store.rs b/flowctl/crates/flowctl-db/benches/event_store.rs deleted file mode 100644 index 7510548f..00000000 --- a/flowctl/crates/flowctl-db/benches/event_store.rs +++ /dev/null @@ -1,68 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use flowctl_core::events::{EpicEvent, EventMetadata, FlowEvent}; -use flowctl_db::pool::open_memory_async; -use flowctl_db::repo::EventStoreRepo; - -fn test_metadata() -> EventMetadata { - EventMetadata { - actor: "bench".into(), - source_cmd: "bench".into(), - session_id: "bench-sess".into(), - timestamp: None, - } -} - -fn bench_append(c: &mut Criterion) { - let rt = tokio::runtime::Runtime::new().unwrap(); - - c.bench_function("event_store_append", |b| { - b.iter(|| { - rt.block_on(async { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - for _ in 0..10 { - repo.append( - "epic:bench-1", - &FlowEvent::Epic(EpicEvent::Created), - &test_metadata(), - ) - .await - .unwrap(); - } - }); - }); - }); -} - -fn bench_query_stream(c: &mut Criterion) { - let rt = tokio::runtime::Runtime::new().unwrap(); - - // Pre-populate the DB outside the benchmark loop. - let (db, conn) = rt.block_on(async { open_memory_async().await.unwrap() }); - rt.block_on(async { - let repo = EventStoreRepo::new(conn.clone()); - for _ in 0..100 { - repo.append( - "epic:bench-q", - &FlowEvent::Epic(EpicEvent::PlanWritten), - &test_metadata(), - ) - .await - .unwrap(); - } - }); - - c.bench_function("event_store_query_stream_100", |b| { - b.iter(|| { - rt.block_on(async { - let repo = EventStoreRepo::new(conn.clone()); - let _events = repo.query_stream("epic:bench-q").await.unwrap(); - }); - }); - }); - - drop(db); -} - -criterion_group!(benches, bench_append, bench_query_stream); -criterion_main!(benches); diff --git a/flowctl/crates/flowctl-db/src/approvals.rs b/flowctl/crates/flowctl-db/src/approvals.rs new file mode 100644 index 00000000..8d4741fe --- /dev/null +++ b/flowctl/crates/flowctl-db/src/approvals.rs @@ -0,0 +1,52 @@ +//! Approval store — delegates to `json_store::approvals_*`. + +use std::path::Path; + +use crate::error::DbError; + +/// Sync approval store backed by `.state/approvals.json`. +pub struct ApprovalStore<'a> { + flow_dir: &'a Path, +} + +impl<'a> ApprovalStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } + } + + /// Read all approval records. + pub fn read(&self) -> Result, DbError> { + let approvals = flowctl_core::json_store::approvals_read(self.flow_dir)?; + Ok(approvals) + } + + /// Write approval records (atomic replacement). + pub fn write(&self, approvals: &[serde_json::Value]) -> Result<(), DbError> { + flowctl_core::json_store::approvals_write(self.flow_dir, approvals)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn approvals_round_trip() { + let tmp = TempDir::new().unwrap(); + let store = ApprovalStore::new(tmp.path()); + + assert!(store.read().unwrap().is_empty()); + + let approvals = vec![ + serde_json::json!({"reviewer": "alice", "status": "approved"}), + serde_json::json!({"reviewer": "bob", "status": "needs_work"}), + ]; + store.write(&approvals).unwrap(); + + let read_back = store.read().unwrap(); + assert_eq!(read_back.len(), 2); + assert_eq!(read_back[0]["reviewer"], "alice"); + } +} diff --git a/flowctl/crates/flowctl-db/src/error.rs b/flowctl/crates/flowctl-db/src/error.rs index babdd048..b47b8c64 100644 --- a/flowctl/crates/flowctl-db/src/error.rs +++ b/flowctl/crates/flowctl-db/src/error.rs @@ -1,27 +1,51 @@ -//! Error types for the libSQL storage layer. +//! Error types for the file-based storage layer. -use thiserror::Error; +use std::fmt; -#[derive(Error, Debug)] +/// Unified error type for flowctl-db operations. +#[derive(Debug)] pub enum DbError { - #[error("libsql error: {0}")] - LibSql(#[from] libsql::Error), + /// Wraps a `json_store::StoreError`. + Store(flowctl_core::json_store::StoreError), - #[error("state directory error: {0}")] - StateDir(String), + /// Serialization / deserialization error. + Serialize(serde_json::Error), - #[error("schema error: {0}")] - Schema(String), - - #[error("serialization error: {0}")] - Serialize(#[from] serde_json::Error), - - #[error("not found: {0}")] + /// Entity not found. NotFound(String), - #[error("constraint violation: {0}")] + /// Constraint violation (e.g. file lock conflict). Constraint(String), - #[error("invalid input: {0}")] + /// Invalid input. InvalidInput(String), } + +impl fmt::Display for DbError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Store(e) => write!(f, "store error: {e}"), + Self::Serialize(e) => write!(f, "serialization error: {e}"), + Self::NotFound(msg) => write!(f, "not found: {msg}"), + Self::Constraint(msg) => write!(f, "constraint violation: {msg}"), + Self::InvalidInput(msg) => write!(f, "invalid input: {msg}"), + } + } +} + +impl std::error::Error for DbError {} + +impl From for DbError { + fn from(e: flowctl_core::json_store::StoreError) -> Self { + match e { + flowctl_core::json_store::StoreError::NotFound(msg) => Self::NotFound(msg), + other => Self::Store(other), + } + } +} + +impl From for DbError { + fn from(e: serde_json::Error) -> Self { + Self::Serialize(e) + } +} diff --git a/flowctl/crates/flowctl-db/src/events.rs b/flowctl/crates/flowctl-db/src/events.rs index 445b2635..428e7c9b 100644 --- a/flowctl/crates/flowctl-db/src/events.rs +++ b/flowctl/crates/flowctl-db/src/events.rs @@ -1,352 +1,63 @@ -//! Extended event logging: query events by type/timerange, record token usage. -//! -//! Ported from `flowctl-db::events` to async libSQL. All methods take -//! an owned `libsql::Connection` (cheap Clone) and are async. +//! Event store — delegates to `json_store::events_*`. -use libsql::{params, Connection}; +use std::path::Path; use crate::error::DbError; -use crate::repo::EventRow; -/// Token usage record for a task/phase. -pub struct TokenRecord<'a> { - pub epic_id: &'a str, - pub task_id: Option<&'a str>, - pub phase: Option<&'a str>, - pub model: Option<&'a str>, - pub input_tokens: i64, - pub output_tokens: i64, - pub cache_read: i64, - pub cache_write: i64, - pub estimated_cost: Option, +/// Sync event store backed by `.state/events.jsonl`. +pub struct EventStore<'a> { + flow_dir: &'a Path, } -/// A row from the token_usage table. -#[derive(Debug, Clone, serde::Serialize)] -pub struct TokenUsageRow { - pub id: i64, - pub timestamp: String, - pub epic_id: String, - pub task_id: Option, - pub phase: Option, - pub model: Option, - pub input_tokens: i64, - pub output_tokens: i64, - pub cache_read: i64, - pub cache_write: i64, - pub estimated_cost: Option, -} - -/// Aggregated token usage for a single task. -#[derive(Debug, Clone, serde::Serialize)] -pub struct TaskTokenSummary { - pub task_id: String, - pub input_tokens: i64, - pub output_tokens: i64, - pub cache_read: i64, - pub cache_write: i64, - pub estimated_cost: f64, -} - -/// Extended async event queries beyond the basic EventRepo. -pub struct EventLog { - conn: Connection, -} - -impl EventLog { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Query events by type, optionally filtered by epic and time range. - pub async fn query( - &self, - event_type: Option<&str>, - epic_id: Option<&str>, - since: Option<&str>, - until: Option<&str>, - limit: usize, - ) -> Result, DbError> { - let mut conditions = Vec::new(); - let mut param_values: Vec = Vec::new(); - - if let Some(et) = event_type { - param_values.push(et.to_string()); - conditions.push(format!("event_type = ?{}", param_values.len())); - } - if let Some(eid) = epic_id { - param_values.push(eid.to_string()); - conditions.push(format!("epic_id = ?{}", param_values.len())); - } - if let Some(s) = since { - param_values.push(s.to_string()); - conditions.push(format!("timestamp >= ?{}", param_values.len())); - } - if let Some(u) = until { - param_values.push(u.to_string()); - conditions.push(format!("timestamp <= ?{}", param_values.len())); - } - - let where_clause = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let sql = format!( - "SELECT id, timestamp, epic_id, task_id, event_type, actor, payload, session_id - FROM events {where_clause} ORDER BY id DESC LIMIT ?{}", - param_values.len() + 1 - ); - - // Build libsql Params: Vec - let mut values: Vec = param_values - .into_iter() - .map(libsql::Value::Text) - .collect(); - values.push(libsql::Value::Integer(limit as i64)); - - let mut rows = self - .conn - .query(&sql, libsql::params::Params::Positional(values)) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(EventRow { - id: row.get::(0)?, - timestamp: row.get::(1)?, - epic_id: row.get::(2)?, - task_id: row.get::>(3)?, - event_type: row.get::(4)?, - actor: row.get::>(5)?, - payload: row.get::>(6)?, - session_id: row.get::>(7)?, - }); - } - Ok(out) +impl<'a> EventStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } } - /// Shortcut: query events by type. - pub async fn query_by_type( - &self, - event_type: &str, - limit: usize, - ) -> Result, DbError> { - self.query(Some(event_type), None, None, None, limit).await + /// Append a JSON event line to the event log. + pub fn append(&self, event_json: &str) -> Result<(), DbError> { + flowctl_core::json_store::events_append(self.flow_dir, event_json)?; + Ok(()) } - /// Record token usage for a task/phase. Returns the inserted row id. - pub async fn record_token_usage(&self, rec: &TokenRecord<'_>) -> Result { - self.conn.execute( - "INSERT INTO token_usage (epic_id, task_id, phase, model, input_tokens, output_tokens, cache_read, cache_write, estimated_cost) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", - params![ - rec.epic_id.to_string(), - rec.task_id.map(std::string::ToString::to_string), - rec.phase.map(std::string::ToString::to_string), - rec.model.map(std::string::ToString::to_string), - rec.input_tokens, - rec.output_tokens, - rec.cache_read, - rec.cache_write, - rec.estimated_cost, - ], - ).await?; - Ok(self.conn.last_insert_rowid()) + /// Read all event lines from the log. + pub fn read_all(&self) -> Result, DbError> { + let lines = flowctl_core::json_store::events_read_all(self.flow_dir)?; + Ok(lines) } - /// Get all token records for a specific task. - pub async fn tokens_by_task(&self, task_id: &str) -> Result, DbError> { - let mut rows = self.conn.query( - "SELECT id, timestamp, epic_id, task_id, phase, model, input_tokens, output_tokens, cache_read, cache_write, estimated_cost - FROM token_usage WHERE task_id = ?1 ORDER BY id ASC", - params![task_id.to_string()], - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(TokenUsageRow { - id: row.get::(0)?, - timestamp: row.get::(1)?, - epic_id: row.get::(2)?, - task_id: row.get::>(3)?, - phase: row.get::>(4)?, - model: row.get::>(5)?, - input_tokens: row.get::(6)?, - output_tokens: row.get::(7)?, - cache_read: row.get::(8)?, - cache_write: row.get::(9)?, - estimated_cost: row.get::>(10)?, - }); - } - Ok(out) - } - - /// Get aggregated token usage per task for an epic. - pub async fn tokens_by_epic(&self, epic_id: &str) -> Result, DbError> { - let mut rows = self.conn.query( - "SELECT task_id, COALESCE(SUM(input_tokens), 0), COALESCE(SUM(output_tokens), 0), - COALESCE(SUM(cache_read), 0), COALESCE(SUM(cache_write), 0), - COALESCE(SUM(estimated_cost), 0.0) - FROM token_usage WHERE epic_id = ?1 AND task_id IS NOT NULL - GROUP BY task_id ORDER BY task_id", - params![epic_id.to_string()], - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(TaskTokenSummary { - task_id: row.get::(0)?, - input_tokens: row.get::(1)?, - output_tokens: row.get::(2)?, - cache_read: row.get::(3)?, - cache_write: row.get::(4)?, - estimated_cost: row.get::(5)?, - }); - } - Ok(out) - } - - /// Count events by type for an epic. - pub async fn count_by_type(&self, epic_id: &str) -> Result, DbError> { - let mut rows = self.conn.query( - "SELECT event_type, COUNT(*) FROM events WHERE epic_id = ?1 GROUP BY event_type ORDER BY COUNT(*) DESC", - params![epic_id.to_string()], - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push((row.get::(0)?, row.get::(1)?)); - } - Ok(out) + /// Read events filtered by stream_id. + pub fn read_by_stream(&self, stream_id: &str) -> Result, DbError> { + let lines = flowctl_core::json_store::events_read_by_stream(self.flow_dir, stream_id)?; + Ok(lines) } } #[cfg(test)] mod tests { use super::*; - use crate::pool::open_memory_async; - use crate::repo::EventRepo; - use libsql::Database; - - async fn setup() -> (Database, Connection) { - let (db, conn) = open_memory_async().await.expect("in-memory db"); - conn.execute( - "INSERT INTO epics (id, title, status, file_path, created_at, updated_at) - VALUES ('fn-1-test', 'Test', 'open', 'e.md', '2025-01-01T00:00:00Z', '2025-01-01T00:00:00Z')", - (), - ).await.unwrap(); - (db, conn) - } + use tempfile::TempDir; - #[tokio::test] - async fn test_query_by_type() { - let (_db, conn) = setup().await; - let repo = EventRepo::new(conn.clone()); - repo.insert("fn-1-test", Some("fn-1-test.1"), "task_started", Some("w"), None, None).await.unwrap(); - repo.insert("fn-1-test", Some("fn-1-test.1"), "task_completed", Some("w"), None, None).await.unwrap(); - repo.insert("fn-1-test", Some("fn-1-test.2"), "task_started", Some("w"), None, None).await.unwrap(); + #[test] + fn append_and_read() { + let tmp = TempDir::new().unwrap(); + let store = EventStore::new(tmp.path()); - let log = EventLog::new(conn.clone()); - let started = log.query(Some("task_started"), None, None, None, 100).await.unwrap(); - assert_eq!(started.len(), 2); + store.append(r#"{"stream_id":"s1","type":"created"}"#).unwrap(); + store.append(r#"{"stream_id":"s2","type":"updated"}"#).unwrap(); + store.append(r#"{"stream_id":"s1","type":"done"}"#).unwrap(); - let completed = log.query(Some("task_completed"), Some("fn-1-test"), None, None, 100).await.unwrap(); - assert_eq!(completed.len(), 1); + let all = store.read_all().unwrap(); + assert_eq!(all.len(), 3); - let all = log.query_by_type("task_started", 10).await.unwrap(); - assert_eq!(all.len(), 2); + let s1 = store.read_by_stream("s1").unwrap(); + assert_eq!(s1.len(), 2); } - #[tokio::test] - async fn test_record_token_usage() { - let (_db, conn) = setup().await; - let log = EventLog::new(conn.clone()); - let id = log.record_token_usage(&TokenRecord { - epic_id: "fn-1-test", - task_id: Some("fn-1-test.1"), - phase: Some("impl"), - model: Some("claude-sonnet-4-20250514"), - input_tokens: 1000, - output_tokens: 500, - cache_read: 200, - cache_write: 100, - estimated_cost: Some(0.015), - }).await.unwrap(); - assert!(id > 0); - - let mut rows = conn.query( - "SELECT SUM(input_tokens + output_tokens) FROM token_usage WHERE epic_id = 'fn-1-test'", - (), - ).await.unwrap(); - let row = rows.next().await.unwrap().unwrap(); - let total: i64 = row.get(0).unwrap(); - assert_eq!(total, 1500); - } - - #[tokio::test] - async fn test_count_by_type() { - let (_db, conn) = setup().await; - let repo = EventRepo::new(conn.clone()); - repo.insert("fn-1-test", None, "task_started", None, None, None).await.unwrap(); - repo.insert("fn-1-test", None, "task_started", None, None, None).await.unwrap(); - repo.insert("fn-1-test", None, "task_completed", None, None, None).await.unwrap(); - - let log = EventLog::new(conn); - let counts = log.count_by_type("fn-1-test").await.unwrap(); - assert_eq!(counts.len(), 2); - assert_eq!(counts[0], ("task_started".to_string(), 2)); - assert_eq!(counts[1], ("task_completed".to_string(), 1)); - } - - #[tokio::test] - async fn test_tokens_by_task_and_epic() { - let (_db, conn) = setup().await; - let log = EventLog::new(conn); - log.record_token_usage(&TokenRecord { - epic_id: "fn-1-test", - task_id: Some("fn-1-test.1"), - phase: Some("impl"), - model: None, - input_tokens: 1000, - output_tokens: 500, - cache_read: 100, - cache_write: 50, - estimated_cost: Some(0.015), - }).await.unwrap(); - log.record_token_usage(&TokenRecord { - epic_id: "fn-1-test", - task_id: Some("fn-1-test.1"), - phase: Some("review"), - model: None, - input_tokens: 800, - output_tokens: 300, - cache_read: 0, - cache_write: 0, - estimated_cost: Some(0.010), - }).await.unwrap(); - log.record_token_usage(&TokenRecord { - epic_id: "fn-1-test", - task_id: Some("fn-1-test.2"), - phase: Some("impl"), - model: None, - input_tokens: 500, - output_tokens: 200, - cache_read: 0, - cache_write: 0, - estimated_cost: Some(0.005), - }).await.unwrap(); - - let t1_rows = log.tokens_by_task("fn-1-test.1").await.unwrap(); - assert_eq!(t1_rows.len(), 2); - assert_eq!(t1_rows[0].input_tokens, 1000); - - let summaries = log.tokens_by_epic("fn-1-test").await.unwrap(); - assert_eq!(summaries.len(), 2); - let t1 = summaries.iter().find(|s| s.task_id == "fn-1-test.1").unwrap(); - assert_eq!(t1.input_tokens, 1800); - assert_eq!(t1.output_tokens, 800); - assert!((t1.estimated_cost - 0.025).abs() < 0.001); + #[test] + fn empty_returns_empty() { + let tmp = TempDir::new().unwrap(); + let store = EventStore::new(tmp.path()); + assert!(store.read_all().unwrap().is_empty()); } } diff --git a/flowctl/crates/flowctl-db/src/gaps.rs b/flowctl/crates/flowctl-db/src/gaps.rs new file mode 100644 index 00000000..bf87d1f3 --- /dev/null +++ b/flowctl/crates/flowctl-db/src/gaps.rs @@ -0,0 +1,68 @@ +//! Gap store — delegates to `json_store::gaps_*`. + +use std::path::Path; + +use crate::error::DbError; + +// Re-export the GapEntry type from json_store. +pub use flowctl_core::json_store::GapEntry; + +/// Sync gap store backed by `gaps/.json`. +pub struct GapStore<'a> { + flow_dir: &'a Path, +} + +impl<'a> GapStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } + } + + /// Read gaps for an epic. + pub fn read(&self, epic_id: &str) -> Result, DbError> { + let gaps = flowctl_core::json_store::gaps_read(self.flow_dir, epic_id)?; + Ok(gaps) + } + + /// Write gaps for an epic (atomic replacement). + pub fn write(&self, epic_id: &str, gaps: &[GapEntry]) -> Result<(), DbError> { + flowctl_core::json_store::gaps_write(self.flow_dir, epic_id, gaps)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn gaps_round_trip() { + let tmp = TempDir::new().unwrap(); + let store = GapStore::new(tmp.path()); + + assert!(store.read("fn-1").unwrap().is_empty()); + + let gaps = vec![ + GapEntry { + id: 1, + capability: "auth".into(), + priority: "required".into(), + source: "test".into(), + resolved: false, + }, + GapEntry { + id: 2, + capability: "logging".into(), + priority: "nice-to-have".into(), + source: "test".into(), + resolved: true, + }, + ]; + store.write("fn-1", &gaps).unwrap(); + + let read_back = store.read("fn-1").unwrap(); + assert_eq!(read_back.len(), 2); + assert_eq!(read_back[0].capability, "auth"); + assert!(read_back[1].resolved); + } +} diff --git a/flowctl/crates/flowctl-db/src/indexer.rs b/flowctl/crates/flowctl-db/src/indexer.rs deleted file mode 100644 index e9211733..00000000 --- a/flowctl/crates/flowctl-db/src/indexer.rs +++ /dev/null @@ -1,521 +0,0 @@ -//! Async reindex engine (port of flowctl-db::indexer for libSQL). -//! -//! Scans `.flow/` Markdown/JSON and rebuilds index tables via async -//! libSQL calls. Idempotent: running twice produces the same result. - -use std::collections::HashMap; -use std::fs; -use std::path::{Path, PathBuf}; - -use libsql::{params, Connection}; -use tracing::{info, warn}; - -use flowctl_core::frontmatter; -use flowctl_core::id::{is_epic_id, is_task_id}; -use flowctl_core::types::{Epic, Task}; - -use crate::error::DbError; -use crate::repo::{EpicRepo, TaskRepo}; - -/// Result of a reindex operation. -#[derive(Debug, Default)] -pub struct ReindexResult { - pub epics_indexed: usize, - pub tasks_indexed: usize, - pub files_skipped: usize, - pub runtime_states_migrated: usize, - pub warnings: Vec, -} - -/// Perform a full reindex of `.flow/` Markdown files into libSQL. -pub async fn reindex( - conn: &Connection, - flow_dir: &Path, - state_dir: Option<&Path>, -) -> Result { - let mut result = ReindexResult::default(); - - // libSQL doesn't currently support BEGIN EXCLUSIVE; use BEGIN. - conn.execute_batch("BEGIN").await?; - - let outcome = reindex_inner(conn, flow_dir, state_dir, &mut result).await; - - match outcome { - Ok(()) => { - conn.execute_batch("COMMIT").await?; - info!( - epics = result.epics_indexed, - tasks = result.tasks_indexed, - skipped = result.files_skipped, - runtime = result.runtime_states_migrated, - "reindex complete" - ); - Ok(result) - } - Err(e) => { - let _ = conn.execute_batch("ROLLBACK").await; - Err(e) - } - } -} - -async fn reindex_inner( - conn: &Connection, - flow_dir: &Path, - state_dir: Option<&Path>, - result: &mut ReindexResult, -) -> Result<(), DbError> { - disable_triggers(conn).await?; - clear_indexed_tables(conn).await?; - - let epics_dir = flow_dir.join("epics"); - let indexed_epics = if epics_dir.is_dir() { - index_epics(conn, &epics_dir, result).await? - } else { - HashMap::new() - }; - - let tasks_dir = flow_dir.join("tasks"); - if tasks_dir.is_dir() { - index_tasks(conn, &tasks_dir, &indexed_epics, result).await?; - } - - if let Some(sd) = state_dir { - migrate_runtime_state(conn, sd, result).await?; - } - - enable_triggers(conn).await?; - Ok(()) -} - -async fn disable_triggers(conn: &Connection) -> Result<(), DbError> { - conn.execute_batch("DROP TRIGGER IF EXISTS trg_daily_rollup;") - .await?; - Ok(()) -} - -async fn enable_triggers(conn: &Connection) -> Result<(), DbError> { - conn.execute_batch( - "CREATE TRIGGER IF NOT EXISTS trg_daily_rollup AFTER INSERT ON events - WHEN NEW.event_type IN ('task_completed', 'task_failed', 'task_started') - BEGIN - INSERT INTO daily_rollup (day, epic_id, tasks_completed, tasks_failed, tasks_started) - VALUES (DATE(NEW.timestamp), NEW.epic_id, - CASE WHEN NEW.event_type = 'task_completed' THEN 1 ELSE 0 END, - CASE WHEN NEW.event_type = 'task_failed' THEN 1 ELSE 0 END, - CASE WHEN NEW.event_type = 'task_started' THEN 1 ELSE 0 END) - ON CONFLICT(day, epic_id) DO UPDATE SET - tasks_completed = tasks_completed + - CASE WHEN NEW.event_type = 'task_completed' THEN 1 ELSE 0 END, - tasks_failed = tasks_failed + - CASE WHEN NEW.event_type = 'task_failed' THEN 1 ELSE 0 END, - tasks_started = tasks_started + - CASE WHEN NEW.event_type = 'task_started' THEN 1 ELSE 0 END; - END;", - ) - .await?; - Ok(()) -} - -async fn clear_indexed_tables(conn: &Connection) -> Result<(), DbError> { - conn.execute_batch( - "DELETE FROM file_ownership; - DELETE FROM task_deps; - DELETE FROM epic_deps; - DELETE FROM tasks; - DELETE FROM epics;", - ) - .await?; - Ok(()) -} - -async fn index_epics( - conn: &Connection, - epics_dir: &Path, - result: &mut ReindexResult, -) -> Result, DbError> { - let repo = EpicRepo::new(conn.clone()); - let mut seen: HashMap = HashMap::new(); - - // .md files - for path in read_files_with_ext(epics_dir, "md") { - let content = match fs::read_to_string(&path) { - Ok(c) => c, - Err(e) => { - let msg = format!("failed to read {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - }; - - let stem = path.file_stem().and_then(|s| s.to_str()).unwrap_or(""); - if !is_epic_id(stem) { - let msg = format!("skipping non-epic file: {}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - - let doc: frontmatter::Document = match frontmatter::parse(&content) { - Ok(d) => d, - Err(e) => { - let msg = format!("invalid frontmatter in {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - }; - let mut epic = doc.frontmatter; - let body = doc.body; - - if let Some(prev_path) = seen.get(&epic.id) { - return Err(DbError::Constraint(format!( - "duplicate epic ID '{}' in {} and {}", - epic.id, - prev_path.display(), - path.display() - ))); - } - - epic.file_path = Some(format!( - "epics/{}", - path.file_name().unwrap().to_string_lossy() - )); - repo.upsert_with_body(&epic, &body).await?; - seen.insert(epic.id.clone(), path.clone()); - result.epics_indexed += 1; - } - - // .json files (Python legacy format) - for path in read_files_with_ext(epics_dir, "json") { - let content = match fs::read_to_string(&path) { - Ok(c) => c, - Err(e) => { - let msg = format!("failed to read {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - }; - - let stem = path.file_stem().and_then(|s| s.to_str()).unwrap_or(""); - if !is_epic_id(stem) { - result.files_skipped += 1; - continue; - } - - if seen.contains_key(stem) { - continue; - } - - let mut epic = match try_parse_json_epic(&content) { - Ok(e) => e, - Err(e) => { - let msg = format!("invalid JSON epic in {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - }; - - epic.file_path = Some(format!( - "epics/{}", - path.file_name().unwrap().to_string_lossy() - )); - repo.upsert_with_body(&epic, "").await?; - seen.insert(epic.id.clone(), path.clone()); - result.epics_indexed += 1; - } - - Ok(seen) -} - -async fn index_tasks( - conn: &Connection, - tasks_dir: &Path, - indexed_epics: &HashMap, - result: &mut ReindexResult, -) -> Result<(), DbError> { - let task_repo = TaskRepo::new(conn.clone()); - let mut seen: HashMap = HashMap::new(); - - for path in read_files_with_ext(tasks_dir, "md") { - let content = match fs::read_to_string(&path) { - Ok(c) => c, - Err(e) => { - let msg = format!("failed to read {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - }; - - let stem = path.file_stem().and_then(|s| s.to_str()).unwrap_or(""); - if !is_task_id(stem) { - let msg = format!("skipping non-task file: {}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - - let (mut task, body) = if content.starts_with("---") { - match frontmatter::parse::(&content) { - Ok(doc) => (doc.frontmatter, doc.body), - Err(e) => { - let msg = format!("invalid frontmatter in {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - } - } else { - match try_parse_python_task_md(&content, stem) { - Ok((t, b)) => (t, b), - Err(e) => { - let msg = - format!("cannot parse Python-format task {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - result.files_skipped += 1; - continue; - } - } - }; - - if let Some(prev_path) = seen.get(&task.id) { - return Err(DbError::Constraint(format!( - "duplicate task ID '{}' in {} and {}", - task.id, - prev_path.display(), - path.display() - ))); - } - - if !indexed_epics.contains_key(&task.epic) { - let msg = format!( - "orphan task '{}' references non-existent epic '{}' (indexing anyway)", - task.id, task.epic - ); - warn!("{}", msg); - result.warnings.push(msg); - insert_placeholder_epic(conn, &task.epic).await?; - } - - task.file_path = Some(format!( - "tasks/{}", - path.file_name().unwrap().to_string_lossy() - )); - - task_repo.upsert_with_body(&task, &body).await?; - seen.insert(task.id.clone(), path.clone()); - result.tasks_indexed += 1; - } - - Ok(()) -} - -async fn insert_placeholder_epic(conn: &Connection, epic_id: &str) -> Result<(), DbError> { - conn.execute( - "INSERT OR IGNORE INTO epics (id, title, status, file_path, created_at, updated_at) - VALUES (?1, ?2, 'open', '', datetime('now'), datetime('now'))", - params![epic_id.to_string(), format!("[placeholder] {}", epic_id)], - ) - .await?; - Ok(()) -} - -async fn migrate_runtime_state( - conn: &Connection, - state_dir: &Path, - result: &mut ReindexResult, -) -> Result<(), DbError> { - let tasks_state_dir = state_dir.join("tasks"); - if !tasks_state_dir.is_dir() { - return Ok(()); - } - - let entries = match fs::read_dir(&tasks_state_dir) { - Ok(e) => e, - Err(_) => return Ok(()), - }; - - for entry in entries.flatten() { - let path = entry.path(); - let name = match path.file_name().and_then(|n| n.to_str()) { - Some(n) => n.to_string(), - None => continue, - }; - - if !name.ends_with(".state.json") { - continue; - } - - let task_id = name.trim_end_matches(".state.json"); - if !is_task_id(task_id) { - continue; - } - - let content = match fs::read_to_string(&path) { - Ok(c) => c, - Err(e) => { - let msg = format!("failed to read runtime state {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - continue; - } - }; - - let state: serde_json::Value = match serde_json::from_str(&content) { - Ok(v) => v, - Err(e) => { - let msg = format!("invalid JSON in {}: {e}", path.display()); - warn!("{}", msg); - result.warnings.push(msg); - continue; - } - }; - - conn.execute( - "INSERT OR REPLACE INTO runtime_state - (task_id, assignee, claimed_at, completed_at, duration_secs, blocked_reason, baseline_rev, final_rev) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", - params![ - task_id.to_string(), - state.get("assignee").and_then(|v| v.as_str()).map(String::from), - state.get("claimed_at").and_then(|v| v.as_str()).map(String::from), - state.get("completed_at").and_then(|v| v.as_str()).map(String::from), - state - .get("duration_secs") - .or_else(|| state.get("duration_seconds")) - .and_then(serde_json::Value::as_i64), - state.get("blocked_reason").and_then(|v| v.as_str()).map(String::from), - state.get("baseline_rev").and_then(|v| v.as_str()).map(String::from), - state.get("final_rev").and_then(|v| v.as_str()).map(String::from), - ], - ) - .await?; - - result.runtime_states_migrated += 1; - } - - Ok(()) -} - -fn read_files_with_ext(dir: &Path, ext: &str) -> Vec { - let mut files: Vec = match fs::read_dir(dir) { - Ok(entries) => entries - .flatten() - .map(|e| e.path()) - .filter(|p| p.extension().and_then(|e| e.to_str()) == Some(ext)) - .collect(), - Err(_) => Vec::new(), - }; - files.sort(); - files -} - -fn try_parse_json_epic(content: &str) -> Result { - let v: serde_json::Value = serde_json::from_str(content).map_err(|e| e.to_string())?; - let obj = v.as_object().ok_or("not an object")?; - - let id = obj.get("id").and_then(|v| v.as_str()).ok_or("missing id")?; - let title = obj.get("title").and_then(|v| v.as_str()).unwrap_or(id); - let status_str = obj - .get("status") - .and_then(|v| v.as_str()) - .unwrap_or("open"); - let status = match status_str { - "closed" | "done" => flowctl_core::types::EpicStatus::Done, - _ => flowctl_core::types::EpicStatus::Open, - }; - let branch_name = obj - .get("branch_name") - .and_then(|v| v.as_str()) - .map(std::string::ToString::to_string); - let created_at = obj - .get("created_at") - .and_then(|v| v.as_str()) - .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) - .map(|d| d.with_timezone(&chrono::Utc)) - .unwrap_or_else(chrono::Utc::now); - let updated_at = obj - .get("updated_at") - .and_then(|v| v.as_str()) - .and_then(|s| chrono::DateTime::parse_from_rfc3339(s).ok()) - .map(|d| d.with_timezone(&chrono::Utc)) - .unwrap_or(created_at); - - Ok(Epic { - schema_version: 1, - id: id.to_string(), - title: title.to_string(), - status, - branch_name, - plan_review: Default::default(), - completion_review: Default::default(), - depends_on_epics: vec![], - default_impl: None, - default_review: None, - default_sync: None, - auto_execute_pending: None, - auto_execute_set_at: None, - archived: false, - file_path: None, - created_at, - updated_at, - }) -} - -fn try_parse_python_task_md(content: &str, filename_stem: &str) -> Result<(Task, String), String> { - let first_line = content.lines().next().unwrap_or(""); - let title = if first_line.starts_with("# ") { - let after_hash = first_line.trim_start_matches("# "); - after_hash - .split_once(' ') - .map(|x| x.1) - .unwrap_or(filename_stem) - .to_string() - } else { - filename_stem.to_string() - }; - - let epic_id = flowctl_core::id::epic_id_from_task(filename_stem) - .map_err(|e| format!("cannot extract epic from {}: {e}", filename_stem))?; - - let status = if content.contains("## Done summary") && !content.contains("## Done summary\nTBD") - { - flowctl_core::state_machine::Status::Done - } else { - flowctl_core::state_machine::Status::Todo - }; - - let body = content.lines().skip(1).collect::>().join("\n"); - - let task = Task { - schema_version: 1, - id: filename_stem.to_string(), - epic: epic_id, - title, - status, - priority: None, - domain: flowctl_core::types::Domain::General, - depends_on: vec![], - files: vec![], - r#impl: None, - review: None, - sync: None, - file_path: Some(format!("tasks/{}.md", filename_stem)), - created_at: chrono::Utc::now(), - updated_at: chrono::Utc::now(), - }; - Ok((task, body)) -} diff --git a/flowctl/crates/flowctl-db/src/lib.rs b/flowctl/crates/flowctl-db/src/lib.rs index 8f6e460b..a5d38164 100644 --- a/flowctl/crates/flowctl-db/src/lib.rs +++ b/flowctl/crates/flowctl-db/src/lib.rs @@ -1,44 +1,35 @@ -//! flowctl-db: Async libSQL storage layer for flowctl. +//! flowctl-db: Sync file-based storage layer for flowctl. //! -//! All DB access is async, Tokio-native. Memory table uses libSQL's native -//! vector column (`F32_BLOB(384)`) for semantic search via `vector_top_k`. +//! All I/O is synchronous, delegating to `flowctl_core::json_store`. +//! No async runtime required — pure synchronous file I/O. //! //! # Architecture //! -//! - **libSQL is the single source of truth.** All reads and writes go -//! through async repository methods. Markdown files are an export format. -//! - **Schema is applied on open** via a single embedded SQL blob, then -//! migrations run to upgrade existing databases (see `migration.rs`). -//! - **Connections are cheap clones.** `libsql::Connection` is `Send + Sync`, -//! pass by value. Do not wrap in `Arc>`. -//! -//! # History -//! -//! This crate was rewritten from rusqlite to libsql in fn-19 (April 2026). -//! The old rusqlite implementation is no longer available. +//! - `FlowStore` is the main entry point, wrapping a `.flow/` directory path. +//! - Sub-stores (`EventStore`, `PipelineStore`, etc.) are accessed via methods. +//! - All data lives as JSON files in the `.flow/` directory tree. +pub mod approvals; pub mod error; pub mod events; -pub mod indexer; +pub mod gaps; +pub mod locks; pub mod memory; -pub mod metrics; -pub mod migration; -pub mod pool; -pub mod repo; -pub mod skill; +pub mod phases; +pub mod pipeline; +pub mod store; pub use error::DbError; -pub use indexer::{reindex, ReindexResult}; -pub use events::{EventLog, TaskTokenSummary, TokenRecord, TokenUsageRow}; -pub use memory::{MemoryEntry, MemoryFilter, MemoryRepo}; -pub use metrics::StatsQuery; -pub use skill::{SkillEntry, SkillMatch, SkillRepo}; -pub use pool::{cleanup, open_async, open_memory_async, resolve_db_path, resolve_libsql_path, resolve_state_dir}; -pub use repo::{ - DepRepo, EpicRepo, EventRepo, EventRow, EventStoreRepo, EvidenceRepo, FileLockRepo, - FileOwnershipRepo, GapRepo, GapRow, LockEntry, LockMode, PhaseProgressRepo, RuntimeRepo, - ScoutCacheRepo, StoredEvent, TaskRepo, max_epic_num, max_task_num, -}; +pub use store::FlowStore; + +// Re-export sub-store types for convenience. +pub use approvals::ApprovalStore; +pub use events::EventStore; +pub use gaps::{GapEntry, GapStore}; +pub use locks::{LockEntry, LockStore}; +pub use memory::MemoryStore; +pub use phases::PhaseStore; +pub use pipeline::PipelineStore; -// Re-export libsql types for callers. -pub use libsql::{Connection, Database}; +// Re-export json_store types that callers may need. +pub use flowctl_core::json_store::TaskState; diff --git a/flowctl/crates/flowctl-db/src/locks.rs b/flowctl/crates/flowctl-db/src/locks.rs new file mode 100644 index 00000000..a827f2ad --- /dev/null +++ b/flowctl/crates/flowctl-db/src/locks.rs @@ -0,0 +1,122 @@ +//! File lock store — delegates to `json_store::lock_*` / `locks_*`. + +use std::path::Path; + +use crate::error::DbError; + +// Re-export the LockEntry type from json_store. +pub use flowctl_core::json_store::LockEntry; + +/// Sync lock store backed by `.state/locks.json`. +pub struct LockStore<'a> { + flow_dir: &'a Path, +} + +impl<'a> LockStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } + } + + /// Acquire a lock on a file for a task. + /// + /// If another task already holds a lock on the file, returns + /// `DbError::Constraint`. + pub fn acquire(&self, file_path: &str, task_id: &str, mode: &str) -> Result<(), DbError> { + // Check for conflict: another task holding the file. + let locks = flowctl_core::json_store::locks_read(self.flow_dir)?; + for lock in &locks { + if lock.file_path == file_path && lock.task_id != task_id { + return Err(DbError::Constraint(format!( + "file '{}' already locked by task '{}'", + file_path, lock.task_id + ))); + } + } + flowctl_core::json_store::lock_acquire(self.flow_dir, file_path, task_id, mode)?; + Ok(()) + } + + /// Check which task holds a lock on a file. + pub fn check(&self, file_path: &str) -> Result, DbError> { + let locks = flowctl_core::json_store::locks_read(self.flow_dir)?; + for lock in &locks { + if lock.file_path == file_path { + return Ok(Some(lock.task_id.clone())); + } + } + Ok(None) + } + + /// Release all locks held by a task. Returns number released. + pub fn release_for_task(&self, task_id: &str) -> Result { + let n = flowctl_core::json_store::lock_release_task(self.flow_dir, task_id)?; + Ok(n) + } + + /// Release all locks. Returns number released. + pub fn release_all(&self) -> Result { + let n = flowctl_core::json_store::locks_clear(self.flow_dir)?; + Ok(n) + } + + /// List all current locks. + pub fn list(&self) -> Result, DbError> { + let locks = flowctl_core::json_store::locks_read(self.flow_dir)?; + Ok(locks) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn acquire_and_check() { + let tmp = TempDir::new().unwrap(); + let store = LockStore::new(tmp.path()); + + store.acquire("src/a.rs", "t1", "write").unwrap(); + assert_eq!(store.check("src/a.rs").unwrap().as_deref(), Some("t1")); + assert!(store.check("src/missing.rs").unwrap().is_none()); + } + + #[test] + fn acquire_conflict() { + let tmp = TempDir::new().unwrap(); + let store = LockStore::new(tmp.path()); + + store.acquire("src/a.rs", "t1", "write").unwrap(); + let err = store.acquire("src/a.rs", "t2", "write").unwrap_err(); + assert!(matches!(err, DbError::Constraint(_))); + } + + #[test] + fn acquire_idempotent() { + let tmp = TempDir::new().unwrap(); + let store = LockStore::new(tmp.path()); + + store.acquire("src/a.rs", "t1", "write").unwrap(); + store.acquire("src/a.rs", "t1", "write").unwrap(); + assert_eq!(store.check("src/a.rs").unwrap().as_deref(), Some("t1")); + } + + #[test] + fn release_for_task_and_all() { + let tmp = TempDir::new().unwrap(); + let store = LockStore::new(tmp.path()); + + store.acquire("src/a.rs", "t1", "write").unwrap(); + store.acquire("src/b.rs", "t1", "write").unwrap(); + store.acquire("src/c.rs", "t2", "write").unwrap(); + + let n = store.release_for_task("t1").unwrap(); + assert_eq!(n, 2); + assert!(store.check("src/a.rs").unwrap().is_none()); + assert_eq!(store.check("src/c.rs").unwrap().as_deref(), Some("t2")); + + let n2 = store.release_all().unwrap(); + assert_eq!(n2, 1); + assert!(store.check("src/c.rs").unwrap().is_none()); + } +} diff --git a/flowctl/crates/flowctl-db/src/memory.rs b/flowctl/crates/flowctl-db/src/memory.rs index d952c739..9355ef97 100644 --- a/flowctl/crates/flowctl-db/src/memory.rs +++ b/flowctl/crates/flowctl-db/src/memory.rs @@ -1,585 +1,68 @@ -//! Memory repository with native libSQL vector search. -//! -//! Memory entries carry a 384-dimensional embedding (BGE-small) stored in -//! the native `F32_BLOB(384)` column. Semantic search uses libSQL's -//! `vector_top_k` virtual function against the `memory_emb_idx` index. -//! -//! ## Offline fallback -//! -//! The first call to `get_embedder()` downloads the BGE-small model -//! (~130MB) to a local cache. If that download fails (no network, no -//! disk space) we log a warning and: -//! - `add()` still inserts the row, with embedding left NULL -//! - `search_semantic()` returns `DbError::Schema("embedder unavailable")` -//! -//! Callers should always have `search_literal()` as a fallback path. -//! -//! ## Tests -//! -//! Tests that require the embedder are gated on a successful -//! `test_embedder_loads` check. In CI environments without network -//! access they will report as passing with a warning. See the test -//! module for details. +//! Memory store — delegates to `json_store::memory_*`. -use std::sync::Mutex; - -use fastembed::{EmbeddingModel, InitOptions, TextEmbedding}; -use libsql::{params, Connection}; -use tokio::sync::OnceCell; +use std::path::Path; use crate::error::DbError; -// ── Types ─────────────────────────────────────────────────────────── - -/// A memory entry (pitfall/convention/decision) with optional embedding. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct MemoryEntry { - pub id: Option, - pub entry_type: String, - pub content: String, - pub summary: Option, - pub hash: Option, - pub module: Option, - pub severity: Option, - pub problem_type: Option, - pub component: Option, - pub tags: Vec, - pub track: Option, - pub created_at: String, - pub last_verified: Option, - pub refs: u32, -} - -impl Default for MemoryEntry { - fn default() -> Self { - Self { - id: None, - entry_type: "convention".to_string(), - content: String::new(), - summary: None, - hash: None, - module: None, - severity: None, - problem_type: None, - component: None, - tags: Vec::new(), - track: None, - created_at: String::new(), - last_verified: None, - refs: 0, - } - } -} - -/// Filter for `list()` and `search_semantic()` queries. -#[derive(Debug, Clone, Default)] -pub struct MemoryFilter { - pub entry_type: Option, - pub module: Option, - pub track: Option, - pub severity: Option, -} - -// ── Embedder (lazy, shared) ───────────────────────────────────────── - -static EMBEDDER: OnceCell, String>> = OnceCell::const_new(); - -/// Lazily initialize the BGE-small embedder. First call downloads the -/// model (~130MB) via fastembed; subsequent calls return the cached -/// instance. Initialization runs on a blocking thread because fastembed -/// performs synchronous file I/O. -pub(crate) async fn ensure_embedder() -> Result<(), DbError> { - let res = EMBEDDER - .get_or_init(|| async { - match tokio::task::spawn_blocking(|| { - TextEmbedding::try_new(InitOptions::new(EmbeddingModel::BGESmallENV15)) - .map(Mutex::new) - .map_err(|e| format!("fastembed init: {e}")) - }) - .await - { - Ok(inner) => inner, - Err(join_err) => Err(format!("spawn_blocking: {join_err}")), - } - }) - .await; - res.as_ref() - .map(|_| ()) - .map_err(|e| DbError::Schema(format!("embedder unavailable: {e}"))) -} - -/// Embed a single passage into a 384-dim vector. -pub(crate) async fn embed_one(text: &str) -> Result, DbError> { - ensure_embedder().await?; - let text = text.to_string(); - let result = tokio::task::spawn_blocking(move || { - let cell = EMBEDDER - .get() - .and_then(|r| r.as_ref().ok()) - .ok_or_else(|| "embedder missing".to_string())?; - let mut emb = cell.lock().map_err(|e| format!("mutex poisoned: {e}"))?; - emb.embed(vec![text], None) - .map_err(|e| format!("embed: {e}")) - }) - .await - .map_err(|e| DbError::Schema(format!("spawn_blocking: {e}")))? - .map_err(DbError::Schema)?; - - result - .into_iter() - .next() - .ok_or_else(|| DbError::Schema("empty embedding result".into())) +/// Sync memory store backed by `memory/entries.jsonl`. +pub struct MemoryStore<'a> { + flow_dir: &'a Path, } -/// Convert a `Vec` into a libSQL `vector32()` literal string. -pub(crate) fn vec_to_literal(v: &[f32]) -> String { - let parts: Vec = v.iter().map(std::string::ToString::to_string).collect(); - format!("[{}]", parts.join(",")) -} - -// ── Repository ────────────────────────────────────────────────────── - -/// Async repository for memory entries + semantic vector search. -pub struct MemoryRepo { - conn: Connection, -} - -impl MemoryRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Insert a memory entry. Auto-generates an embedding from `content` - /// when the embedder is available; otherwise leaves the embedding - /// NULL and logs a warning. Returns the new row id. - /// - /// If `entry.hash` collides with an existing row, returns the - /// existing id (treated as an upsert-style no-op on the insert). - pub async fn add(&self, entry: &MemoryEntry) -> Result { - // Dedup by hash first. - if let Some(ref h) = entry.hash { - let mut rows = self - .conn - .query("SELECT id FROM memory WHERE hash = ?1", params![h.clone()]) - .await?; - if let Some(row) = rows.next().await? { - return Ok(row.get::(0)?); - } - } - - let tags_json = serde_json::to_string(&entry.tags)?; - let created_at = if entry.created_at.is_empty() { - chrono::Utc::now().to_rfc3339() - } else { - entry.created_at.clone() - }; - - self.conn - .execute( - "INSERT INTO memory ( - entry_type, content, summary, hash, module, severity, - problem_type, component, tags, track, created_at, - last_verified, refs - ) VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13)", - params![ - entry.entry_type.clone(), - entry.content.clone(), - entry.summary.clone(), - entry.hash.clone(), - entry.module.clone(), - entry.severity.clone(), - entry.problem_type.clone(), - entry.component.clone(), - tags_json, - entry.track.clone(), - created_at, - entry.last_verified.clone(), - entry.refs as i64, - ], - ) - .await?; - - let id = self.conn.last_insert_rowid(); - - // Attempt to embed; swallow failures (NULL embedding is fine). - match embed_one(&entry.content).await { - Ok(vec) => { - let lit = vec_to_literal(&vec); - self.conn - .execute( - "UPDATE memory SET embedding = vector32(?1) WHERE id = ?2", - params![lit, id], - ) - .await?; - } - Err(e) => { - tracing::warn!( - memory_id = id, - error = %e, - "embedder unavailable; memory inserted without embedding" - ); - } - } - - Ok(id) - } - - /// Fetch a single entry by id. - pub async fn get(&self, id: i64) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id, entry_type, content, summary, hash, module, severity, - problem_type, component, tags, track, created_at, - last_verified, refs - FROM memory WHERE id = ?1", - params![id], - ) - .await?; - match rows.next().await? { - Some(row) => Ok(Some(row_to_entry(&row)?)), - None => Ok(None), - } - } - - /// List entries matching the provided filter. All filter fields are - /// AND-joined; `None` fields are ignored. - pub async fn list(&self, filter: MemoryFilter) -> Result, DbError> { - let (where_clause, args) = build_filter_sql(&filter); - let sql = format!( - "SELECT id, entry_type, content, summary, hash, module, severity, - problem_type, component, tags, track, created_at, - last_verified, refs - FROM memory - {where_clause} - ORDER BY created_at DESC" - ); - let mut rows = self.conn.query(&sql, args).await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row_to_entry(&row)?); - } - Ok(out) - } - - /// Substring match on `content`. No embedder required. - pub async fn search_literal( - &self, - query: &str, - limit: usize, - ) -> Result, DbError> { - let pat = format!("%{query}%"); - let mut rows = self - .conn - .query( - "SELECT id, entry_type, content, summary, hash, module, severity, - problem_type, component, tags, track, created_at, - last_verified, refs - FROM memory - WHERE content LIKE ?1 - ORDER BY refs DESC, created_at DESC - LIMIT ?2", - params![pat, limit as i64], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row_to_entry(&row)?); - } - Ok(out) - } - - /// Semantic search via libSQL `vector_top_k`. Returns entries whose - /// embedding is closest to `query`'s embedding. Fails with - /// `DbError::Schema` if the embedder is not available. - pub async fn search_semantic( - &self, - query: &str, - limit: usize, - filter: Option, - ) -> Result, DbError> { - let vec = embed_one(query).await?; - let lit = vec_to_literal(&vec); - - // vector_top_k returns (id, distance) rows; join on rowid. - // Over-fetch when filters are applied so we can still return `limit` matches. - let filter = filter.unwrap_or_default(); - let has_filter = filter.entry_type.is_some() - || filter.module.is_some() - || filter.track.is_some() - || filter.severity.is_some(); - let fetch = if has_filter { limit * 4 } else { limit }; - - let mut rows = self - .conn - .query( - "SELECT m.id, m.entry_type, m.content, m.summary, m.hash, m.module, - m.severity, m.problem_type, m.component, m.tags, m.track, - m.created_at, m.last_verified, m.refs - FROM vector_top_k('memory_emb_idx', vector32(?1), ?2) AS top - JOIN memory m ON m.rowid = top.id", - params![lit, fetch as i64], - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - let entry = row_to_entry(&row)?; - if passes_filter(&entry, &filter) { - out.push(entry); - if out.len() >= limit { - break; - } - } - } - Ok(out) - } - - /// Delete an entry by id. - pub async fn delete(&self, id: i64) -> Result<(), DbError> { - self.conn - .execute("DELETE FROM memory WHERE id = ?1", params![id]) - .await?; - Ok(()) +impl<'a> MemoryStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } } - /// Increment the `refs` counter for an entry. - pub async fn increment_refs(&self, id: i64) -> Result<(), DbError> { - self.conn - .execute( - "UPDATE memory SET refs = refs + 1 WHERE id = ?1", - params![id], - ) - .await?; + /// Append a JSON memory entry. + pub fn append(&self, entry_json: &str) -> Result<(), DbError> { + flowctl_core::json_store::memory_append(self.flow_dir, entry_json)?; Ok(()) } -} - -// ── Row helpers ───────────────────────────────────────────────────── - -fn row_to_entry(row: &libsql::Row) -> Result { - let tags_raw: String = row.get::(9).unwrap_or_else(|_| "[]".to_string()); - let tags: Vec = serde_json::from_str(&tags_raw).unwrap_or_default(); - Ok(MemoryEntry { - id: Some(row.get::(0)?), - entry_type: row.get::(1)?, - content: row.get::(2)?, - summary: row.get::>(3)?, - hash: row.get::>(4)?, - module: row.get::>(5)?, - severity: row.get::>(6)?, - problem_type: row.get::>(7)?, - component: row.get::>(8)?, - tags, - track: row.get::>(10)?, - created_at: row.get::(11)?, - last_verified: row.get::>(12)?, - refs: row.get::(13)? as u32, - }) -} -fn build_filter_sql(f: &MemoryFilter) -> (String, Vec) { - let mut clauses = Vec::new(); - let mut args: Vec = Vec::new(); - let mut i = 1; - if let Some(ref v) = f.entry_type { - clauses.push(format!("entry_type = ?{i}")); - args.push(libsql::Value::Text(v.clone())); - i += 1; + /// Read all memory entries. + pub fn read_all(&self) -> Result, DbError> { + let entries = flowctl_core::json_store::memory_read_all(self.flow_dir)?; + Ok(entries) } - if let Some(ref v) = f.module { - clauses.push(format!("module = ?{i}")); - args.push(libsql::Value::Text(v.clone())); - i += 1; - } - if let Some(ref v) = f.track { - clauses.push(format!("track = ?{i}")); - args.push(libsql::Value::Text(v.clone())); - i += 1; - } - if let Some(ref v) = f.severity { - clauses.push(format!("severity = ?{i}")); - args.push(libsql::Value::Text(v.clone())); - // i += 1; // last binding - } - if clauses.is_empty() { - (String::new(), args) - } else { - (format!("WHERE {}", clauses.join(" AND ")), args) - } -} -fn passes_filter(e: &MemoryEntry, f: &MemoryFilter) -> bool { - if let Some(ref v) = f.entry_type { - if &e.entry_type != v { - return false; - } + /// Search memory entries by case-insensitive substring match. + pub fn search(&self, query: &str) -> Result, DbError> { + let results = flowctl_core::json_store::memory_search_text(self.flow_dir, query)?; + Ok(results) } - if let Some(ref v) = f.module { - if e.module.as_ref() != Some(v) { - return false; - } - } - if let Some(ref v) = f.track { - if e.track.as_ref() != Some(v) { - return false; - } - } - if let Some(ref v) = f.severity { - if e.severity.as_ref() != Some(v) { - return false; - } - } - true } -// ── Tests ─────────────────────────────────────────────────────────── - #[cfg(test)] mod tests { use super::*; - use crate::pool::open_memory_async; + use tempfile::TempDir; - async fn fresh_repo() -> MemoryRepo { - let (_db, conn) = open_memory_async().await.expect("open memory db"); - // Keep db alive for the duration of the test by leaking — the - // test holds conn which references the same underlying store. - // Actually we need to keep Database alive; Box::leak it. - let _ = Box::leak(Box::new(_db)); - MemoryRepo::new(conn) - } - - fn sample(content: &str, entry_type: &str) -> MemoryEntry { - MemoryEntry { - entry_type: entry_type.to_string(), - content: content.to_string(), - ..MemoryEntry::default() - } - } + #[test] + fn append_and_read() { + let tmp = TempDir::new().unwrap(); + let store = MemoryStore::new(tmp.path()); - #[tokio::test] - async fn test_add_get_delete_no_embedder() { - // Uses a bogus content; add() will still succeed even if the - // embedder fails because we tolerate missing embeddings. - let repo = fresh_repo().await; - let id = repo - .add(&sample("hello world", "convention")) - .await - .expect("add"); - let fetched = repo.get(id).await.expect("get").expect("some"); - assert_eq!(fetched.content, "hello world"); - assert_eq!(fetched.entry_type, "convention"); + store.append(r#"{"text":"Rust is great"}"#).unwrap(); + store.append(r#"{"text":"Python is also nice"}"#).unwrap(); - repo.delete(id).await.expect("delete"); - assert!(repo.get(id).await.expect("get").is_none()); + let all = store.read_all().unwrap(); + assert_eq!(all.len(), 2); } - #[tokio::test] - async fn test_search_literal() { - let repo = fresh_repo().await; - repo.add(&sample("database migration tooling", "pitfall")) - .await - .unwrap(); - repo.add(&sample("prefer iterators over loops", "convention")) - .await - .unwrap(); + #[test] + fn search_text() { + let tmp = TempDir::new().unwrap(); + let store = MemoryStore::new(tmp.path()); - let results = repo.search_literal("migration", 10).await.unwrap(); - assert_eq!(results.len(), 1); - assert!(results[0].content.contains("migration")); + store.append(r#"{"text":"Rust is great"}"#).unwrap(); + store.append(r#"{"text":"Python is also nice"}"#).unwrap(); + store.append(r#"{"text":"rust patterns"}"#).unwrap(); - let none = repo.search_literal("nonexistent-xyz", 10).await.unwrap(); - assert!(none.is_empty()); - } - - #[tokio::test] - async fn test_list_with_filter() { - let repo = fresh_repo().await; - repo.add(&sample("a pitfall", "pitfall")).await.unwrap(); - repo.add(&sample("a convention", "convention")) - .await - .unwrap(); - repo.add(&sample("a decision", "decision")).await.unwrap(); - - let conventions = repo - .list(MemoryFilter { - entry_type: Some("convention".into()), - ..Default::default() - }) - .await - .unwrap(); - assert_eq!(conventions.len(), 1); - assert_eq!(conventions[0].entry_type, "convention"); - - let all = repo.list(MemoryFilter::default()).await.unwrap(); - assert_eq!(all.len(), 3); - } + let found = store.search("rust").unwrap(); + assert_eq!(found.len(), 2); - #[tokio::test] - async fn test_increment_refs() { - let repo = fresh_repo().await; - let id = repo - .add(&sample("refcount test", "convention")) - .await - .unwrap(); - assert_eq!(repo.get(id).await.unwrap().unwrap().refs, 0); - repo.increment_refs(id).await.unwrap(); - repo.increment_refs(id).await.unwrap(); - assert_eq!(repo.get(id).await.unwrap().unwrap().refs, 2); - } - - #[tokio::test] - async fn test_dedup_by_hash() { - let repo = fresh_repo().await; - let mut e = sample("same content", "convention"); - e.hash = Some("abc123".to_string()); - let id1 = repo.add(&e).await.unwrap(); - let id2 = repo.add(&e).await.unwrap(); - assert_eq!(id1, id2, "same hash should return existing id"); - } - - /// Verify the embedder can be loaded. If this test is `ignored` by - /// the user or fails due to network, semantic tests will be gated. - /// Requires ~130MB BGE-small download on first run. - #[tokio::test] - #[ignore = "requires network for ~130MB fastembed model download"] - async fn test_embedder_loads() { - ensure_embedder().await.expect("embedder must load"); - let v = embed_one("hello world").await.expect("embed"); - assert_eq!(v.len(), 384); - } - - /// Semantic search end-to-end. Gated behind `#[ignore]` because the - /// first run downloads the BGE-small model (~130MB). - #[tokio::test] - #[ignore = "requires fastembed model (~130MB); run with --ignored"] - async fn test_search_semantic() { - let repo = fresh_repo().await; - repo.add(&sample( - "SQL database performance and query optimization", - "convention", - )) - .await - .unwrap(); - repo.add(&sample( - "React component lifecycle and hooks", - "convention", - )) - .await - .unwrap(); - repo.add(&sample("Rust ownership and borrow checker", "convention")) - .await - .unwrap(); - - let results = repo - .search_semantic("javascript frontend framework", 1, None) - .await - .expect("semantic search"); - assert_eq!(results.len(), 1); - assert!( - results[0].content.contains("React"), - "expected React result, got: {}", - results[0].content - ); + let none = store.search("javascript").unwrap(); + assert!(none.is_empty()); } } diff --git a/flowctl/crates/flowctl-db/src/metrics.rs b/flowctl/crates/flowctl-db/src/metrics.rs deleted file mode 100644 index 82b0ae15..00000000 --- a/flowctl/crates/flowctl-db/src/metrics.rs +++ /dev/null @@ -1,514 +0,0 @@ -//! Stats queries: summary, per-epic, weekly trends, token/cost analysis, -//! bottleneck analysis, DORA metrics, domain duration stats, monthly rollup. -//! -//! Ported from `flowctl-db::metrics` to async libSQL. All methods take an -//! owned `libsql::Connection` (cheap Clone) and are async. - -use libsql::{params, Connection}; -use serde::Serialize; - -use crate::error::DbError; - -/// Overall summary stats. -#[derive(Debug, Serialize)] -pub struct Summary { - pub total_epics: i64, - pub open_epics: i64, - pub total_tasks: i64, - pub done_tasks: i64, - pub in_progress_tasks: i64, - pub blocked_tasks: i64, - pub total_events: i64, - pub total_tokens: i64, - pub total_cost_usd: f64, -} - -/// Per-epic stats row. -#[derive(Debug, Serialize)] -pub struct EpicStats { - pub epic_id: String, - pub title: String, - pub status: String, - pub task_count: i64, - pub done_count: i64, - pub avg_duration_secs: Option, - pub total_tokens: i64, - pub total_cost: f64, -} - -/// Weekly trend data point. -#[derive(Debug, Serialize)] -pub struct WeeklyTrend { - pub week: String, - pub tasks_started: i64, - pub tasks_completed: i64, - pub tasks_failed: i64, -} - -/// Token usage breakdown. -#[derive(Debug, Serialize)] -pub struct TokenBreakdown { - pub epic_id: String, - pub model: String, - pub input_tokens: i64, - pub output_tokens: i64, - pub cache_read: i64, - pub cache_write: i64, - pub estimated_cost: f64, -} - -/// Bottleneck: tasks that took longest or were blocked. -#[derive(Debug, Serialize)] -pub struct Bottleneck { - pub task_id: String, - pub epic_id: String, - pub title: String, - pub duration_secs: Option, - pub status: String, - pub blocked_reason: Option, -} - -/// DORA metrics. -#[derive(Debug, Serialize)] -pub struct DoraMetrics { - pub lead_time_hours: Option, - pub throughput_per_week: f64, - pub change_failure_rate: f64, - pub time_to_restore_hours: Option, -} - -/// Per-domain historical duration statistics. -#[derive(Debug, Clone, Serialize)] -pub struct DomainDurationStats { - pub domain: String, - pub completed_count: i64, - pub avg_duration_secs: f64, - pub stddev_duration_secs: f64, -} - -/// Async stats query engine. -pub struct StatsQuery { - conn: Connection, -} - -impl StatsQuery { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - async fn scalar_i64(&self, sql: &str) -> Result { - let mut rows = self.conn.query(sql, ()).await?; - let row = rows - .next() - .await? - .ok_or_else(|| DbError::NotFound("scalar query".into()))?; - Ok(row.get::(0)?) - } - - async fn scalar_f64(&self, sql: &str) -> Result { - let mut rows = self.conn.query(sql, ()).await?; - let row = rows - .next() - .await? - .ok_or_else(|| DbError::NotFound("scalar query".into()))?; - Ok(row.get::(0)?) - } - - /// Overall summary across all epics. - pub async fn summary(&self) -> Result { - Ok(Summary { - total_epics: self.scalar_i64("SELECT COUNT(*) FROM epics").await?, - open_epics: self - .scalar_i64("SELECT COUNT(*) FROM epics WHERE status = 'open'") - .await?, - total_tasks: self.scalar_i64("SELECT COUNT(*) FROM tasks").await?, - done_tasks: self - .scalar_i64("SELECT COUNT(*) FROM tasks WHERE status = 'done'") - .await?, - in_progress_tasks: self - .scalar_i64("SELECT COUNT(*) FROM tasks WHERE status = 'in_progress'") - .await?, - blocked_tasks: self - .scalar_i64("SELECT COUNT(*) FROM tasks WHERE status = 'blocked'") - .await?, - total_events: self.scalar_i64("SELECT COUNT(*) FROM events").await?, - total_tokens: self - .scalar_i64( - "SELECT COALESCE(SUM(input_tokens + output_tokens), 0) FROM token_usage", - ) - .await?, - total_cost_usd: self - .scalar_f64("SELECT COALESCE(SUM(estimated_cost), 0.0) FROM token_usage") - .await?, - }) - } - - /// Per-epic stats. - pub async fn epic_stats(&self, epic_id: Option<&str>) -> Result, DbError> { - let mut rows = match epic_id { - Some(id) => { - self.conn.query( - "SELECT e.id, e.title, e.status, - (SELECT COUNT(*) FROM tasks t WHERE t.epic_id = e.id), - (SELECT COUNT(*) FROM tasks t WHERE t.epic_id = e.id AND t.status = 'done'), - (SELECT AVG(rs.duration_secs) FROM runtime_state rs - JOIN tasks t ON t.id = rs.task_id WHERE t.epic_id = e.id AND rs.duration_secs IS NOT NULL), - COALESCE((SELECT SUM(tu.input_tokens + tu.output_tokens) FROM token_usage tu WHERE tu.epic_id = e.id), 0), - COALESCE((SELECT SUM(tu.estimated_cost) FROM token_usage tu WHERE tu.epic_id = e.id), 0.0) - FROM epics e WHERE e.id = ?1", - params![id.to_string()], - ).await? - } - None => { - self.conn.query( - "SELECT e.id, e.title, e.status, - (SELECT COUNT(*) FROM tasks t WHERE t.epic_id = e.id), - (SELECT COUNT(*) FROM tasks t WHERE t.epic_id = e.id AND t.status = 'done'), - (SELECT AVG(rs.duration_secs) FROM runtime_state rs - JOIN tasks t ON t.id = rs.task_id WHERE t.epic_id = e.id AND rs.duration_secs IS NOT NULL), - COALESCE((SELECT SUM(tu.input_tokens + tu.output_tokens) FROM token_usage tu WHERE tu.epic_id = e.id), 0), - COALESCE((SELECT SUM(tu.estimated_cost) FROM token_usage tu WHERE tu.epic_id = e.id), 0.0) - FROM epics e ORDER BY e.created_at", - (), - ).await? - } - }; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(EpicStats { - epic_id: row.get::(0)?, - title: row.get::(1)?, - status: row.get::(2)?, - task_count: row.get::(3)?, - done_count: row.get::(4)?, - avg_duration_secs: row.get::>(5)?, - total_tokens: row.get::>(6)?.unwrap_or(0), - total_cost: row.get::>(7)?.unwrap_or(0.0), - }); - } - Ok(out) - } - - /// Weekly trends from daily_rollup (last N weeks). - pub async fn weekly_trends(&self, weeks: u32) -> Result, DbError> { - let offset = format!("-{} days", weeks * 7); - let mut rows = self.conn.query( - "SELECT strftime('%Y-W%W', day) AS week, - SUM(tasks_started), SUM(tasks_completed), SUM(tasks_failed) - FROM daily_rollup - WHERE day >= strftime('%Y-%m-%d', 'now', ?1) - GROUP BY week ORDER BY week", - params![offset], - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(WeeklyTrend { - week: row.get::(0)?, - tasks_started: row.get::>(1)?.unwrap_or(0), - tasks_completed: row.get::>(2)?.unwrap_or(0), - tasks_failed: row.get::>(3)?.unwrap_or(0), - }); - } - Ok(out) - } - - /// Token/cost breakdown by epic and model. - pub async fn token_breakdown(&self, epic_id: Option<&str>) -> Result, DbError> { - let mut rows = match epic_id { - Some(id) => { - self.conn.query( - "SELECT epic_id, COALESCE(model, 'unknown'), SUM(input_tokens), SUM(output_tokens), - SUM(cache_read), SUM(cache_write), SUM(estimated_cost) - FROM token_usage WHERE epic_id = ?1 - GROUP BY epic_id, model ORDER BY SUM(estimated_cost) DESC", - params![id.to_string()], - ).await? - } - None => { - self.conn.query( - "SELECT epic_id, COALESCE(model, 'unknown'), SUM(input_tokens), SUM(output_tokens), - SUM(cache_read), SUM(cache_write), SUM(estimated_cost) - FROM token_usage - GROUP BY epic_id, model ORDER BY SUM(estimated_cost) DESC", - (), - ).await? - } - }; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(TokenBreakdown { - epic_id: row.get::(0)?, - model: row.get::(1)?, - input_tokens: row.get::>(2)?.unwrap_or(0), - output_tokens: row.get::>(3)?.unwrap_or(0), - cache_read: row.get::>(4)?.unwrap_or(0), - cache_write: row.get::>(5)?.unwrap_or(0), - estimated_cost: row.get::>(6)?.unwrap_or(0.0), - }); - } - Ok(out) - } - - /// Bottleneck analysis: longest-running and blocked tasks. - pub async fn bottlenecks(&self, limit: usize) -> Result, DbError> { - let mut rows = self.conn.query( - "SELECT t.id, t.epic_id, t.title, rs.duration_secs, t.status, rs.blocked_reason - FROM tasks t - LEFT JOIN runtime_state rs ON rs.task_id = t.id - WHERE t.status IN ('done', 'blocked', 'in_progress') - ORDER BY - CASE WHEN t.status = 'blocked' THEN 0 ELSE 1 END, - rs.duration_secs DESC NULLS LAST - LIMIT ?1", - params![limit as i64], - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(Bottleneck { - task_id: row.get::(0)?, - epic_id: row.get::(1)?, - title: row.get::(2)?, - duration_secs: row.get::>(3)?, - status: row.get::(4)?, - blocked_reason: row.get::>(5)?, - }); - } - Ok(out) - } - - /// DORA-style metrics. - pub async fn dora_metrics(&self) -> Result { - // Lead time - let mut rows = self.conn.query( - "SELECT AVG(rs.duration_secs) - FROM runtime_state rs - WHERE rs.completed_at >= strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-30 days') - AND rs.duration_secs IS NOT NULL", - (), - ).await?; - let lead_time_secs: Option = match rows.next().await? { - Some(row) => row.get::>(0)?, - None => None, - }; - - // Throughput - let mut rows = self.conn.query( - "SELECT CAST(COUNT(*) AS REAL) FROM runtime_state - WHERE completed_at >= strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-28 days') - AND completed_at IS NOT NULL", - (), - ).await?; - let completed_28d: f64 = match rows.next().await? { - Some(row) => row.get::(0).unwrap_or(0.0), - None => 0.0, - }; - - // Change failure rate - let mut rows = self.conn.query( - "SELECT - COALESCE(SUM(CASE WHEN event_type = 'task_completed' THEN 1 ELSE 0 END), 0), - COALESCE(SUM(CASE WHEN event_type = 'task_failed' THEN 1 ELSE 0 END), 0) - FROM events - WHERE timestamp >= strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-30 days') - AND event_type IN ('task_completed', 'task_failed')", - (), - ).await?; - let (completed_30d, failed_30d): (f64, f64) = match rows.next().await? { - Some(row) => ( - row.get::(0).unwrap_or(0) as f64, - row.get::(1).unwrap_or(0) as f64, - ), - None => (0.0, 0.0), - }; - - let change_failure_rate = if (completed_30d + failed_30d) > 0.0 { - failed_30d / (completed_30d + failed_30d) - } else { - 0.0 - }; - - // TTR - let mut rows = self.conn.query( - "SELECT AVG(CAST( - (julianday(rs.completed_at) - julianday(rs.claimed_at)) * 86400 AS REAL - )) - FROM runtime_state rs - WHERE rs.blocked_reason IS NOT NULL - AND rs.completed_at IS NOT NULL - AND rs.claimed_at IS NOT NULL", - (), - ).await?; - let ttr_secs: Option = match rows.next().await? { - Some(row) => row.get::>(0)?, - None => None, - }; - - Ok(DoraMetrics { - lead_time_hours: lead_time_secs.map(|s| s / 3600.0), - throughput_per_week: completed_28d / 4.0, - change_failure_rate, - time_to_restore_hours: ttr_secs.map(|s| s / 3600.0), - }) - } - - /// Per-domain duration statistics for completed tasks. - pub async fn domain_duration_stats(&self) -> Result, DbError> { - let mut rows = self.conn.query( - "SELECT t.domain, - COUNT(*) AS cnt, - AVG(rs.duration_secs) AS avg_dur, - AVG(rs.duration_secs * rs.duration_secs) AS avg_sq - FROM tasks t - JOIN runtime_state rs ON rs.task_id = t.id - WHERE t.status = 'done' - AND rs.duration_secs IS NOT NULL - GROUP BY t.domain", - (), - ).await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - let avg: f64 = row.get::(2)?; - let avg_sq: f64 = row.get::(3)?; - let variance = (avg_sq - avg * avg).max(0.0); - out.push(DomainDurationStats { - domain: row.get::(0)?, - completed_count: row.get::(1)?, - avg_duration_secs: avg, - stddev_duration_secs: variance.sqrt(), - }); - } - Ok(out) - } - - /// Generate monthly rollup. - pub async fn generate_monthly_rollups(&self) -> Result { - let n = self.conn.execute( - "INSERT OR REPLACE INTO monthly_rollup (month, epics_completed, tasks_completed, avg_lead_time_h, total_tokens, total_cost_usd) - SELECT - strftime('%Y-%m', day) AS month, - COALESCE((SELECT COUNT(*) FROM epics e WHERE e.status = 'done' - AND strftime('%Y-%m', e.updated_at) = strftime('%Y-%m', dr.day)), 0), - SUM(dr.tasks_completed), - COALESCE((SELECT AVG(rs.duration_secs) / 3600.0 FROM runtime_state rs - WHERE rs.completed_at IS NOT NULL - AND strftime('%Y-%m', rs.completed_at) = strftime('%Y-%m', dr.day)), 0), - COALESCE((SELECT SUM(tu.input_tokens + tu.output_tokens) FROM token_usage tu - WHERE strftime('%Y-%m', tu.timestamp) = strftime('%Y-%m', dr.day)), 0), - COALESCE((SELECT SUM(tu.estimated_cost) FROM token_usage tu - WHERE strftime('%Y-%m', tu.timestamp) = strftime('%Y-%m', dr.day)), 0.0) - FROM daily_rollup dr - GROUP BY strftime('%Y-%m', day)", - (), - ).await?; - Ok(n) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool::open_memory_async; - use crate::repo::EventRepo; - - async fn setup() -> (libsql::Database, Connection) { - let (db, conn) = open_memory_async().await.expect("in-memory db"); - conn.execute( - "INSERT INTO epics (id, title, status, file_path, created_at, updated_at) - VALUES ('fn-1-test', 'Test Epic', 'open', 'e.md', '2025-01-01T00:00:00Z', '2025-01-01T00:00:00Z')", - (), - ).await.unwrap(); - conn.execute( - "INSERT INTO tasks (id, epic_id, title, status, file_path, created_at, updated_at) - VALUES ('fn-1-test.1', 'fn-1-test', 'Task 1', 'done', 't1.md', '2025-01-01T00:00:00Z', '2025-01-01T00:00:00Z')", - (), - ).await.unwrap(); - conn.execute( - "INSERT INTO tasks (id, epic_id, title, status, file_path, created_at, updated_at) - VALUES ('fn-1-test.2', 'fn-1-test', 'Task 2', 'in_progress', 't2.md', '2025-01-01T00:00:00Z', '2025-01-01T00:00:00Z')", - (), - ).await.unwrap(); - (db, conn) - } - - #[tokio::test] - async fn test_summary() { - let (_db, conn) = setup().await; - let stats = StatsQuery::new(conn); - let s = stats.summary().await.unwrap(); - assert_eq!(s.total_epics, 1); - assert_eq!(s.total_tasks, 2); - assert_eq!(s.done_tasks, 1); - assert_eq!(s.in_progress_tasks, 1); - } - - #[tokio::test] - async fn test_epic_stats() { - let (_db, conn) = setup().await; - let stats = StatsQuery::new(conn); - let epics = stats.epic_stats(None).await.unwrap(); - assert_eq!(epics.len(), 1); - assert_eq!(epics[0].task_count, 2); - assert_eq!(epics[0].done_count, 1); - - let one = stats.epic_stats(Some("fn-1-test")).await.unwrap(); - assert_eq!(one.len(), 1); - assert_eq!(one[0].epic_id, "fn-1-test"); - } - - #[tokio::test] - async fn test_weekly_trends() { - let (_db, conn) = setup().await; - let repo = EventRepo::new(conn.clone()); - repo.insert("fn-1-test", Some("fn-1-test.1"), "task_started", None, None, None).await.unwrap(); - repo.insert("fn-1-test", Some("fn-1-test.1"), "task_completed", None, None, None).await.unwrap(); - - let stats = StatsQuery::new(conn); - let trends = stats.weekly_trends(4).await.unwrap(); - assert!(!trends.is_empty()); - assert!(trends[0].tasks_started > 0); - } - - #[tokio::test] - async fn test_token_breakdown() { - let (_db, conn) = setup().await; - conn.execute( - "INSERT INTO token_usage (epic_id, task_id, model, input_tokens, output_tokens, estimated_cost) - VALUES ('fn-1-test', 'fn-1-test.1', 'claude-sonnet-4-20250514', 1000, 500, 0.01)", - (), - ).await.unwrap(); - - let stats = StatsQuery::new(conn); - let tokens = stats.token_breakdown(None).await.unwrap(); - assert_eq!(tokens.len(), 1); - assert_eq!(tokens[0].input_tokens, 1000); - assert_eq!(tokens[0].output_tokens, 500); - } - - #[tokio::test] - async fn test_bottlenecks() { - let (_db, conn) = setup().await; - conn.execute( - "INSERT INTO runtime_state (task_id, duration_secs) VALUES ('fn-1-test.1', 3600)", - (), - ).await.unwrap(); - - let stats = StatsQuery::new(conn); - let bottlenecks = stats.bottlenecks(10).await.unwrap(); - assert!(!bottlenecks.is_empty()); - assert_eq!(bottlenecks[0].task_id, "fn-1-test.1"); - } - - #[tokio::test] - async fn test_dora_metrics() { - let (_db, conn) = setup().await; - let stats = StatsQuery::new(conn); - let dora = stats.dora_metrics().await.unwrap(); - assert_eq!(dora.throughput_per_week, 0.0); - assert_eq!(dora.change_failure_rate, 0.0); - } -} diff --git a/flowctl/crates/flowctl-db/src/migration.rs b/flowctl/crates/flowctl-db/src/migration.rs deleted file mode 100644 index 890afbc4..00000000 --- a/flowctl/crates/flowctl-db/src/migration.rs +++ /dev/null @@ -1,272 +0,0 @@ -//! Schema migration infrastructure for flowctl-db. -//! -//! Tracks schema version in a `_meta` table and runs numbered migrations -//! sequentially. Migrations are idempotent (safe to re-run). - -use libsql::Connection; - -use crate::error::DbError; - -/// Current target schema version. Bump this when adding new migrations. -const TARGET_VERSION: i64 = 5; - -/// Ensure `_meta` table exists and run any pending migrations. -pub async fn migrate(conn: &Connection) -> Result<(), DbError> { - // Create the _meta table if it doesn't exist. - conn.execute( - "CREATE TABLE IF NOT EXISTS _meta (key TEXT PRIMARY KEY, value TEXT)", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("_meta table creation failed: {e}")))?; - - let current = get_version(conn).await?; - - if current < 2 { - migrate_v2(conn).await?; - } - - if current < 3 { - migrate_v3(conn).await?; - } - - if current < 4 { - migrate_v4(conn).await?; - } - - if current < 5 { - migrate_v5(conn).await?; - } - - // Update stored version to target. - if current < TARGET_VERSION { - set_version(conn, TARGET_VERSION).await?; - } - - Ok(()) -} - -/// Read current schema version from `_meta`. Returns 1 if no version is set -/// (meaning the DB has the original schema but no migration history). -async fn get_version(conn: &Connection) -> Result { - let mut rows = conn - .query( - "SELECT value FROM _meta WHERE key = 'schema_version'", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("_meta query failed: {e}")))?; - - if let Some(row) = rows - .next() - .await - .map_err(|e| DbError::Schema(format!("_meta row read failed: {e}")))? - { - let val: String = row - .get(0) - .map_err(|e| DbError::Schema(format!("_meta value read failed: {e}")))?; - val.parse::() - .map_err(|e| DbError::Schema(format!("_meta version parse failed: {e}"))) - } else { - // No version stored yet — this is a v1 database (original schema). - Ok(1) - } -} - -/// Write schema version to `_meta`. -async fn set_version(conn: &Connection, version: i64) -> Result<(), DbError> { - conn.execute( - "INSERT INTO _meta (key, value) VALUES ('schema_version', ?1) \ - ON CONFLICT(key) DO UPDATE SET value = excluded.value", - libsql::params![version.to_string()], - ) - .await - .map_err(|e| DbError::Schema(format!("_meta version update failed: {e}")))?; - Ok(()) -} - -/// Migration v2: Add TTL columns to file_locks. -/// -/// - `holder_pid INTEGER` — PID of the process holding the lock -/// - `expires_at TEXT` — ISO-8601 expiry timestamp for TTL-based cleanup -/// -/// Uses `.ok()` on each ALTER TABLE because the column may already exist -/// on re-run (ALTER TABLE ADD COLUMN is not idempotent in SQLite/libSQL). -async fn migrate_v2(conn: &Connection) -> Result<(), DbError> { - let _ = conn - .execute( - "ALTER TABLE file_locks ADD COLUMN holder_pid INTEGER", - (), - ) - .await - .ok(); - - let _ = conn - .execute( - "ALTER TABLE file_locks ADD COLUMN expires_at TEXT", - (), - ) - .await - .ok(); - - Ok(()) -} - -/// Migration v3: Change file_locks PK to composite (file_path, task_id) -/// and add `lock_mode TEXT DEFAULT 'write'`. -/// -/// SQLite can't ALTER PRIMARY KEY, so we recreate the table. -async fn migrate_v3(conn: &Connection) -> Result<(), DbError> { - conn.execute( - "CREATE TABLE IF NOT EXISTS file_locks_new ( - file_path TEXT NOT NULL, - task_id TEXT NOT NULL, - locked_at TEXT NOT NULL, - holder_pid INTEGER, - expires_at TEXT, - lock_mode TEXT NOT NULL DEFAULT 'write', - PRIMARY KEY (file_path, task_id) - )", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("file_locks_new creation failed: {e}")))?; - - // Copy existing data (add default lock_mode for old rows). - conn.execute( - "INSERT OR IGNORE INTO file_locks_new (file_path, task_id, locked_at, holder_pid, expires_at, lock_mode) - SELECT file_path, task_id, locked_at, holder_pid, expires_at, COALESCE(lock_mode, 'write') - FROM file_locks", - (), - ) - .await - .ok(); // May fail if file_locks doesn't have lock_mode column yet — that's fine. - - conn.execute("DROP TABLE IF EXISTS file_locks", ()) - .await - .map_err(|e| DbError::Schema(format!("file_locks drop failed: {e}")))?; - - conn.execute( - "ALTER TABLE file_locks_new RENAME TO file_locks", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("file_locks rename failed: {e}")))?; - - Ok(()) -} - -/// Migration v4: Add scout_cache table for caching scout results. -async fn migrate_v4(conn: &Connection) -> Result<(), DbError> { - conn.execute( - "CREATE TABLE IF NOT EXISTS scout_cache ( - key TEXT PRIMARY KEY, - commit_hash TEXT NOT NULL, - scout_type TEXT NOT NULL, - result TEXT NOT NULL, - created_at TEXT NOT NULL DEFAULT (datetime('now')) - )", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("scout_cache creation failed: {e}")))?; - - conn.execute( - "CREATE INDEX IF NOT EXISTS idx_scout_cache_type ON scout_cache(scout_type)", - (), - ) - .await - .ok(); - - conn.execute( - "CREATE INDEX IF NOT EXISTS idx_scout_cache_created ON scout_cache(created_at)", - (), - ) - .await - .ok(); - - Ok(()) -} - -/// Migration v5: Add event_store and pipeline_progress tables for event sourcing. -/// -/// These tables are created in `schema.sql` for fresh databases; this migration -/// adds them to databases created before v5. -async fn migrate_v5(conn: &Connection) -> Result<(), DbError> { - conn.execute( - "CREATE TABLE IF NOT EXISTS event_store ( - event_id INTEGER PRIMARY KEY AUTOINCREMENT, - stream_id TEXT NOT NULL, - version INTEGER NOT NULL, - event_type TEXT NOT NULL, - payload TEXT NOT NULL, - metadata TEXT, - created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')) - )", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("event_store creation failed: {e}")))?; - - conn.execute( - "CREATE UNIQUE INDEX IF NOT EXISTS idx_event_store_stream_version - ON event_store(stream_id, version)", - (), - ) - .await - .ok(); - - conn.execute( - "CREATE TABLE IF NOT EXISTS pipeline_progress ( - epic_id TEXT PRIMARY KEY, - phase TEXT NOT NULL DEFAULT 'plan', - started_at TEXT, - updated_at TEXT - )", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("pipeline_progress creation failed: {e}")))?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool; - - #[tokio::test] - async fn test_migrate_fresh_db() { - let (_db, conn) = pool::open_memory_async().await.unwrap(); - - // Verify _meta table exists and version is set. - let version = get_version(&conn).await.unwrap(); - assert_eq!(version, TARGET_VERSION, "version should be {TARGET_VERSION} after open"); - - // Verify file_locks has the new columns. - let mut rows = conn - .query("SELECT name FROM pragma_table_info('file_locks')", ()) - .await - .unwrap(); - - let mut cols: Vec = Vec::new(); - while let Some(row) = rows.next().await.unwrap() { - cols.push(row.get::(0).unwrap()); - } - - assert!(cols.contains(&"holder_pid".to_string()), "holder_pid missing: {cols:?}"); - assert!(cols.contains(&"expires_at".to_string()), "expires_at missing: {cols:?}"); - } - - #[tokio::test] - async fn test_migrate_idempotent() { - let (_db, conn) = pool::open_memory_async().await.unwrap(); - - // Run migrate again — should not error. - migrate(&conn).await.expect("second migrate should be idempotent"); - - let version = get_version(&conn).await.unwrap(); - assert_eq!(version, TARGET_VERSION); - } -} diff --git a/flowctl/crates/flowctl-db/src/phases.rs b/flowctl/crates/flowctl-db/src/phases.rs new file mode 100644 index 00000000..7efe4a80 --- /dev/null +++ b/flowctl/crates/flowctl-db/src/phases.rs @@ -0,0 +1,70 @@ +//! Phase progress store — delegates to `json_store::phase_*` / `phases_*`. + +use std::path::Path; + +use crate::error::DbError; + +/// Sync phase progress store backed by `.state/phases.json`. +pub struct PhaseStore<'a> { + flow_dir: &'a Path, +} + +impl<'a> PhaseStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } + } + + /// Mark a phase as completed for a task. + pub fn mark_done(&self, task_id: &str, phase: &str) -> Result<(), DbError> { + flowctl_core::json_store::phase_mark_done(self.flow_dir, task_id, phase)?; + Ok(()) + } + + /// Get all completed phases for a task. + pub fn get_completed(&self, task_id: &str) -> Result, DbError> { + let phases = flowctl_core::json_store::phases_completed(self.flow_dir, task_id)?; + Ok(phases) + } + + /// Reset all phase progress for a task. Returns the number cleared. + pub fn reset(&self, task_id: &str) -> Result<(), DbError> { + flowctl_core::json_store::phases_reset(self.flow_dir, task_id)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn mark_done_and_get() { + let tmp = TempDir::new().unwrap(); + let store = PhaseStore::new(tmp.path()); + + store.mark_done("t1", "plan").unwrap(); + store.mark_done("t1", "implement").unwrap(); + + let phases = store.get_completed("t1").unwrap(); + assert_eq!(phases, vec!["plan", "implement"]); + + // Idempotent re-mark. + store.mark_done("t1", "plan").unwrap(); + assert_eq!(store.get_completed("t1").unwrap().len(), 2); + } + + #[test] + fn reset_clears_phases() { + let tmp = TempDir::new().unwrap(); + let store = PhaseStore::new(tmp.path()); + + store.mark_done("t1", "1").unwrap(); + store.mark_done("t1", "5").unwrap(); + store.mark_done("t2", "1").unwrap(); + + store.reset("t1").unwrap(); + assert!(store.get_completed("t1").unwrap().is_empty()); + assert_eq!(store.get_completed("t2").unwrap(), vec!["1"]); + } +} diff --git a/flowctl/crates/flowctl-db/src/pipeline.rs b/flowctl/crates/flowctl-db/src/pipeline.rs new file mode 100644 index 00000000..919a41bd --- /dev/null +++ b/flowctl/crates/flowctl-db/src/pipeline.rs @@ -0,0 +1,52 @@ +//! Pipeline progress store — delegates to `json_store::pipeline_*`. + +use std::path::Path; + +use crate::error::DbError; + +/// Sync pipeline store backed by `.state/pipeline.json`. +pub struct PipelineStore<'a> { + flow_dir: &'a Path, +} + +impl<'a> PipelineStore<'a> { + pub fn new(flow_dir: &'a Path) -> Self { + Self { flow_dir } + } + + /// Read the current pipeline phase for an epic. + pub fn read(&self, epic_id: &str) -> Result, DbError> { + let phase = flowctl_core::json_store::pipeline_read(self.flow_dir, epic_id)?; + Ok(phase) + } + + /// Set the pipeline phase for an epic. + pub fn write(&self, epic_id: &str, phase: &str) -> Result<(), DbError> { + flowctl_core::json_store::pipeline_write(self.flow_dir, epic_id, phase)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn pipeline_read_write() { + let tmp = TempDir::new().unwrap(); + let store = PipelineStore::new(tmp.path()); + + assert_eq!(store.read("fn-1").unwrap(), None); + + store.write("fn-1", "plan").unwrap(); + assert_eq!(store.read("fn-1").unwrap().as_deref(), Some("plan")); + + store.write("fn-1", "work").unwrap(); + assert_eq!(store.read("fn-1").unwrap().as_deref(), Some("work")); + + store.write("fn-2", "plan").unwrap(); + assert_eq!(store.read("fn-2").unwrap().as_deref(), Some("plan")); + assert_eq!(store.read("fn-1").unwrap().as_deref(), Some("work")); + } +} diff --git a/flowctl/crates/flowctl-db/src/pool.rs b/flowctl/crates/flowctl-db/src/pool.rs deleted file mode 100644 index aa856e51..00000000 --- a/flowctl/crates/flowctl-db/src/pool.rs +++ /dev/null @@ -1,326 +0,0 @@ -//! Async libSQL connection setup and schema application. -//! -//! # Architecture -//! -//! - **libSQL** is fully async, Tokio-based. All DB calls are `.await`. -//! - Schema is applied once on open via `apply_schema()` — a single SQL -//! blob (see `schema.sql`). Migrations run after schema init (see `migration.rs`). -//! - `libsql::Connection` is cheap and `Clone`. Pass by value; do not wrap -//! in `Arc>`. -//! - PRAGMAs (WAL, busy_timeout, foreign_keys) are set per-connection on -//! each `open_async()` call. -//! -//! # In-memory databases -//! -//! libSQL `:memory:` databases are **connection-scoped**: schema applied on -//! one connection is not visible to another from the same `Database`. -//! `open_memory_async()` returns both the `Database` AND the `Connection` -//! with schema applied — callers must use that connection directly. - -use std::path::{Path, PathBuf}; -use std::process::Command; - -use libsql::{Builder, Connection, Database}; - -use crate::error::DbError; -use crate::migration; - -/// Embedded schema applied to fresh databases. -const SCHEMA_SQL: &str = include_str!("schema.sql"); - -/// Resolve the state directory for the flowctl database. -/// -/// Uses `git rev-parse --git-common-dir` so worktrees share a single DB. -/// Falls back to `.flow/.state/` if not in a git repo. -pub fn resolve_state_dir(working_dir: &Path) -> Result { - let git_result = Command::new("git") - .args(["rev-parse", "--git-common-dir"]) - .current_dir(working_dir) - .output(); - - match git_result { - Ok(output) if output.status.success() => { - let git_common = String::from_utf8_lossy(&output.stdout).trim().to_string(); - let git_common_path = if Path::new(&git_common).is_absolute() { - PathBuf::from(git_common) - } else { - working_dir.join(git_common) - }; - Ok(git_common_path.join("flow-state")) - } - _ => Ok(working_dir.join(".flow").join(".state")), - } -} - -/// Resolve the full libSQL database file path. -pub fn resolve_libsql_path(working_dir: &Path) -> Result { - let state_dir = resolve_state_dir(working_dir)?; - Ok(state_dir.join("flowctl.db")) -} - -/// Apply production PRAGMAs to a libSQL connection. -/// -/// Some PRAGMAs (journal_mode, synchronous) return a row reporting the -/// resulting value, so we must use `query()` rather than `execute()`. -async fn apply_pragmas(conn: &Connection) -> Result<(), DbError> { - for pragma in [ - "PRAGMA journal_mode = WAL", - "PRAGMA busy_timeout = 5000", - "PRAGMA synchronous = NORMAL", - "PRAGMA foreign_keys = ON", - "PRAGMA wal_autocheckpoint = 1000", - ] { - // query() handles both row-returning and no-row PRAGMAs. - let mut rows = conn - .query(pragma, ()) - .await - .map_err(|e| DbError::Schema(format!("pragma {pragma}: {e}")))?; - // Drain any result rows. - while let Some(_row) = rows - .next() - .await - .map_err(|e| DbError::Schema(format!("pragma {pragma} drain: {e}")))? - {} - } - Ok(()) -} - -/// Apply the full libSQL schema to a fresh database. -async fn apply_schema(conn: &Connection) -> Result<(), DbError> { - conn.execute_batch(SCHEMA_SQL) - .await - .map_err(|e| DbError::Schema(format!("schema apply failed: {e}")))?; - - // Backfill reverse deps from any pre-existing task_deps rows. - conn.execute( - "INSERT OR IGNORE INTO task_reverse_deps (depends_on, task_id) SELECT depends_on, task_id FROM task_deps", - (), - ) - .await - .map_err(|e| DbError::Schema(format!("reverse deps backfill failed: {e}")))?; - - // Try to create vector indexes (requires libSQL server extensions). - // Gracefully degrade if not available (embedded/core mode). - let _ = conn - .execute( - "CREATE INDEX IF NOT EXISTS memory_emb_idx ON memory(libsql_vector_idx(embedding))", - (), - ) - .await; - - let _ = conn - .execute( - "CREATE INDEX IF NOT EXISTS skills_emb_idx ON skills(libsql_vector_idx(embedding))", - (), - ) - .await; - - Ok(()) -} - -/// Open a file-backed libSQL database with schema applied. -pub async fn open_async(working_dir: &Path) -> Result { - let db_path = resolve_libsql_path(working_dir)?; - - if let Some(parent) = db_path.parent() { - std::fs::create_dir_all(parent).map_err(|e| { - DbError::StateDir(format!("failed to create {}: {e}", parent.display())) - })?; - } - - let db = Builder::new_local(&db_path) - .build() - .await - .map_err(|e| DbError::Schema(format!("libsql open: {e}")))?; - - let conn = db.connect()?; - apply_pragmas(&conn).await?; - apply_schema(&conn).await?; - migration::migrate(&conn).await?; - - Ok(db) -} - -/// Alias for `resolve_libsql_path` (naming parity with old flowctl-db). -pub fn resolve_db_path(working_dir: &Path) -> Result { - resolve_libsql_path(working_dir) -} - -/// Delete old events and rollups to keep the DB small. -/// Returns the number of rows removed. -pub async fn cleanup(conn: &Connection) -> Result { - let events_deleted = conn - .execute( - "DELETE FROM events WHERE timestamp < strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-90 days')", - (), - ) - .await?; - - let rollups_deleted = conn - .execute( - "DELETE FROM daily_rollup WHERE day < strftime('%Y-%m-%d', 'now', '-365 days')", - (), - ) - .await?; - - Ok(events_deleted + rollups_deleted) -} - -/// Open an in-memory libSQL database for testing. -/// -/// Returns both the `Database` handle and a `Connection` with schema -/// applied. The connection must be kept alive to access the in-memory -/// database (libsql `:memory:` DBs are connection-scoped). -pub async fn open_memory_async() -> Result<(Database, Connection), DbError> { - let db = Builder::new_local(":memory:") - .build() - .await - .map_err(|e| DbError::Schema(format!("libsql open_memory: {e}")))?; - - let conn = db.connect()?; - apply_pragmas(&conn).await.ok(); - apply_schema(&conn).await?; - migration::migrate(&conn).await?; - - Ok((db, conn)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_open_memory_async() { - let (_db, conn) = open_memory_async() - .await - .expect("should open in-memory libsql db"); - - let mut rows = conn - .query( - "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name", - (), - ) - .await - .unwrap(); - - let mut tables: Vec = Vec::new(); - while let Some(row) = rows.next().await.unwrap() { - tables.push(row.get::(0).unwrap()); - } - - for expected in [ - "epics", - "tasks", - "task_deps", - "task_reverse_deps", - "epic_deps", - "file_ownership", - "runtime_state", - "file_locks", - "heartbeats", - "phase_progress", - "evidence", - "events", - "token_usage", - "daily_rollup", - "monthly_rollup", - "memory", - "skills", - "event_store", - "pipeline_progress", - "_meta", - ] { - assert!( - tables.contains(&expected.to_string()), - "{expected} table missing; tables={tables:?}" - ); - } - } - - #[tokio::test] - async fn test_insert_and_query_async() { - let (_db, conn) = open_memory_async().await.unwrap(); - - conn.execute( - "INSERT INTO epics (id, title, status, file_path, created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", - libsql::params![ - "fn-test-1", - "Test Epic", - "open", - "epics/fn-test-1.md", - "2026-04-05T00:00:00Z", - "2026-04-05T00:00:00Z" - ], - ) - .await - .unwrap(); - - let mut rows = conn - .query( - "SELECT title FROM epics WHERE id = ?1", - libsql::params!["fn-test-1"], - ) - .await - .unwrap(); - let row = rows.next().await.unwrap().expect("row exists"); - let title: String = row.get(0).unwrap(); - assert_eq!(title, "Test Epic"); - } - - #[tokio::test] - async fn test_memory_has_embedding_column() { - let (_db, conn) = open_memory_async().await.unwrap(); - - let mut rows = conn - .query("SELECT name FROM pragma_table_info('memory')", ()) - .await - .unwrap(); - - let mut cols: Vec = Vec::new(); - while let Some(row) = rows.next().await.unwrap() { - cols.push(row.get::(0).unwrap()); - } - - assert!( - cols.contains(&"embedding".to_string()), - "embedding column missing: {cols:?}" - ); - } - - #[tokio::test] - async fn test_event_trigger_fires() { - let (_db, conn) = open_memory_async().await.unwrap(); - - conn.execute( - "INSERT INTO epics (id, title, status, file_path, created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", - libsql::params![ - "fn-trg", - "Trigger Test", - "open", - "epics/fn-trg.md", - "2026-04-05T00:00:00Z", - "2026-04-05T00:00:00Z" - ], - ) - .await - .unwrap(); - - conn.execute( - "INSERT INTO events (epic_id, task_id, event_type, actor) VALUES (?1, ?2, ?3, ?4)", - libsql::params!["fn-trg", "fn-trg.1", "task_completed", "worker"], - ) - .await - .unwrap(); - - let mut rows = conn - .query( - "SELECT tasks_completed FROM daily_rollup WHERE epic_id = ?1", - libsql::params!["fn-trg"], - ) - .await - .unwrap(); - let row = rows.next().await.unwrap().expect("rollup row exists"); - let completed: i64 = row.get(0).unwrap(); - assert_eq!(completed, 1); - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/deps.rs b/flowctl/crates/flowctl-db/src/repo/deps.rs deleted file mode 100644 index 675378f5..00000000 --- a/flowctl/crates/flowctl-db/src/repo/deps.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! Async repository for task and epic dependency edges. - -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// Async repository for task and epic dependency edges. -pub struct DepRepo { - conn: Connection, -} - -impl DepRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - pub async fn add_task_dep(&self, task_id: &str, depends_on: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT OR IGNORE INTO task_deps (task_id, depends_on) VALUES (?1, ?2)", - params![task_id.to_string(), depends_on.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn remove_task_dep(&self, task_id: &str, depends_on: &str) -> Result<(), DbError> { - self.conn - .execute( - "DELETE FROM task_deps WHERE task_id = ?1 AND depends_on = ?2", - params![task_id.to_string(), depends_on.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn list_task_deps(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT depends_on FROM task_deps WHERE task_id = ?1 ORDER BY depends_on", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } - - /// Direct dependents of a task (one level) via the reverse index. O(1) lookup. - pub async fn list_dependents(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT task_id FROM task_reverse_deps WHERE depends_on = ?1 ORDER BY task_id", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } - - /// All transitive dependents of a task (recursive BFS) via the reverse index. - pub async fn list_all_dependents(&self, task_id: &str) -> Result, DbError> { - let mut result = Vec::new(); - let mut visited = std::collections::HashSet::new(); - let mut queue = std::collections::VecDeque::new(); - queue.push_back(task_id.to_string()); - visited.insert(task_id.to_string()); - - while let Some(current) = queue.pop_front() { - let direct = self.list_dependents(¤t).await?; - for dep in direct { - if visited.insert(dep.clone()) { - result.push(dep.clone()); - queue.push_back(dep); - } - } - } - - result.sort(); - Ok(result) - } - - pub async fn add_epic_dep(&self, epic_id: &str, depends_on: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT OR IGNORE INTO epic_deps (epic_id, depends_on) VALUES (?1, ?2)", - params![epic_id.to_string(), depends_on.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn remove_epic_dep(&self, epic_id: &str, depends_on: &str) -> Result<(), DbError> { - self.conn - .execute( - "DELETE FROM epic_deps WHERE epic_id = ?1 AND depends_on = ?2", - params![epic_id.to_string(), depends_on.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn list_epic_deps(&self, epic_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT depends_on FROM epic_deps WHERE epic_id = ?1 ORDER BY depends_on", - params![epic_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/epic.rs b/flowctl/crates/flowctl-db/src/repo/epic.rs deleted file mode 100644 index 4f00c940..00000000 --- a/flowctl/crates/flowctl-db/src/repo/epic.rs +++ /dev/null @@ -1,214 +0,0 @@ -//! Async repository for epic CRUD operations. - -use chrono::Utc; -use libsql::{params, Connection}; - -use flowctl_core::types::{Epic, EpicStatus}; - -use crate::error::DbError; - -use super::helpers::{parse_datetime, parse_epic_status, parse_review_status}; -use flowctl_core::types::ReviewStatus; - -/// Async repository for epic CRUD operations. -pub struct EpicRepo { - conn: Connection, -} - -impl EpicRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Insert or replace an epic (empty body preserves existing body). - pub async fn upsert(&self, epic: &Epic) -> Result<(), DbError> { - self.upsert_with_body(epic, "").await - } - - /// Insert or replace an epic with its markdown body. - pub async fn upsert_with_body(&self, epic: &Epic, body: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT INTO epics (id, title, status, branch_name, plan_review, auto_execute_pending, auto_execute_set_at, archived, file_path, body, created_at, updated_at) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12) - ON CONFLICT(id) DO UPDATE SET - title = excluded.title, - status = excluded.status, - branch_name = excluded.branch_name, - plan_review = excluded.plan_review, - auto_execute_pending = excluded.auto_execute_pending, - auto_execute_set_at = excluded.auto_execute_set_at, - archived = excluded.archived, - file_path = excluded.file_path, - body = CASE WHEN excluded.body = '' THEN epics.body ELSE excluded.body END, - updated_at = excluded.updated_at", - params![ - epic.id.clone(), - epic.title.clone(), - epic.status.to_string(), - epic.branch_name.clone(), - epic.plan_review.to_string(), - epic.auto_execute_pending.unwrap_or(false) as i64, - epic.auto_execute_set_at.clone(), - epic.archived as i64, - epic.file_path.clone().unwrap_or_default(), - body.to_string(), - epic.created_at.to_rfc3339(), - epic.updated_at.to_rfc3339(), - ], - ) - .await?; - - // Upsert epic dependencies. - self.conn - .execute( - "DELETE FROM epic_deps WHERE epic_id = ?1", - params![epic.id.clone()], - ) - .await?; - for dep in &epic.depends_on_epics { - self.conn - .execute( - "INSERT INTO epic_deps (epic_id, depends_on) VALUES (?1, ?2)", - params![epic.id.clone(), dep.clone()], - ) - .await?; - } - - Ok(()) - } - - /// Get an epic by ID. - pub async fn get(&self, id: &str) -> Result { - self.get_with_body(id).await.map(|(epic, _)| epic) - } - - /// Get an epic by ID, returning (Epic, body). - pub async fn get_with_body(&self, id: &str) -> Result<(Epic, String), DbError> { - let mut rows = self - .conn - .query( - "SELECT id, title, status, branch_name, plan_review, file_path, created_at, updated_at, COALESCE(body, ''), auto_execute_pending, auto_execute_set_at, archived - FROM epics WHERE id = ?1", - params![id.to_string()], - ) - .await?; - - let row = rows - .next() - .await? - .ok_or_else(|| DbError::NotFound(format!("epic: {id}")))?; - - let status_s: String = row.get(2)?; - let plan_s: String = row.get(4)?; - let created_s: String = row.get(6)?; - let updated_s: String = row.get(7)?; - let auto_exec_pending: i64 = row.get::(9).unwrap_or(0); - let auto_exec_set_at: Option = row.get::>(10).unwrap_or(None); - let archived_val: i64 = row.get::(11).unwrap_or(0); - - let epic = Epic { - schema_version: 1, - id: row.get::(0)?, - title: row.get::(1)?, - status: parse_epic_status(&status_s), - branch_name: row.get::>(3)?, - plan_review: parse_review_status(&plan_s), - completion_review: ReviewStatus::Unknown, - depends_on_epics: Vec::new(), - default_impl: None, - default_review: None, - default_sync: None, - auto_execute_pending: if auto_exec_pending != 0 { Some(true) } else { None }, - auto_execute_set_at: auto_exec_set_at, - archived: archived_val != 0, - file_path: row.get::>(5)?, - created_at: parse_datetime(&created_s), - updated_at: parse_datetime(&updated_s), - }; - let body: String = row.get::(8)?; - - let deps = self.get_deps(&epic.id).await?; - Ok(( - Epic { - depends_on_epics: deps, - ..epic - }, - body, - )) - } - - /// List all epics, optionally filtered by status. - pub async fn list(&self, status: Option<&str>) -> Result, DbError> { - let mut rows = match status { - Some(s) => { - self.conn - .query( - "SELECT id FROM epics WHERE status = ?1 ORDER BY created_at", - params![s.to_string()], - ) - .await? - } - None => { - self.conn - .query("SELECT id FROM epics ORDER BY created_at", ()) - .await? - } - }; - - let mut ids: Vec = Vec::new(); - while let Some(row) = rows.next().await? { - ids.push(row.get::(0)?); - } - - let mut out = Vec::with_capacity(ids.len()); - for id in &ids { - out.push(self.get(id).await?); - } - Ok(out) - } - - /// Update epic status. - pub async fn update_status(&self, id: &str, status: EpicStatus) -> Result<(), DbError> { - let rows = self - .conn - .execute( - "UPDATE epics SET status = ?1, updated_at = ?2 WHERE id = ?3", - params![status.to_string(), Utc::now().to_rfc3339(), id.to_string()], - ) - .await?; - if rows == 0 { - return Err(DbError::NotFound(format!("epic: {id}"))); - } - Ok(()) - } - - /// Delete an epic and its dep rows. - pub async fn delete(&self, id: &str) -> Result<(), DbError> { - self.conn - .execute( - "DELETE FROM epic_deps WHERE epic_id = ?1", - params![id.to_string()], - ) - .await?; - self.conn - .execute("DELETE FROM epics WHERE id = ?1", params![id.to_string()]) - .await?; - Ok(()) - } - - async fn get_deps(&self, epic_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT depends_on FROM epic_deps WHERE epic_id = ?1", - params![epic_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/event.rs b/flowctl/crates/flowctl-db/src/repo/event.rs deleted file mode 100644 index ac96ab18..00000000 --- a/flowctl/crates/flowctl-db/src/repo/event.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! Async repository for the append-only event log. - -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// A row from the events table. -#[derive(Debug, Clone, serde::Serialize)] -pub struct EventRow { - pub id: i64, - pub timestamp: String, - pub epic_id: String, - pub task_id: Option, - pub event_type: String, - pub actor: Option, - pub payload: Option, - pub session_id: Option, -} - -/// Async repository for the append-only event log. -pub struct EventRepo { - conn: Connection, -} - -impl EventRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Record an event. Returns the inserted rowid. - pub async fn insert( - &self, - epic_id: &str, - task_id: Option<&str>, - event_type: &str, - actor: Option<&str>, - payload: Option<&str>, - session_id: Option<&str>, - ) -> Result { - self.conn - .execute( - "INSERT INTO events (epic_id, task_id, event_type, actor, payload, session_id) - VALUES (?1, ?2, ?3, ?4, ?5, ?6)", - params![ - epic_id.to_string(), - task_id.map(std::string::ToString::to_string), - event_type.to_string(), - actor.map(std::string::ToString::to_string), - payload.map(std::string::ToString::to_string), - session_id.map(std::string::ToString::to_string), - ], - ) - .await?; - Ok(self.conn.last_insert_rowid()) - } - - /// List recent events for an epic (most recent first). - pub async fn list_by_epic( - &self, - epic_id: &str, - limit: usize, - ) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id, timestamp, epic_id, task_id, event_type, actor, payload, session_id - FROM events WHERE epic_id = ?1 ORDER BY id DESC LIMIT ?2", - params![epic_id.to_string(), limit as i64], - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(EventRow { - id: row.get::(0)?, - timestamp: row.get::(1)?, - epic_id: row.get::(2)?, - task_id: row.get::>(3)?, - event_type: row.get::(4)?, - actor: row.get::>(5)?, - payload: row.get::>(6)?, - session_id: row.get::>(7)?, - }); - } - Ok(out) - } - - /// List recent events of a given type across all epics. - pub async fn list_by_type( - &self, - event_type: &str, - limit: usize, - ) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id, timestamp, epic_id, task_id, event_type, actor, payload, session_id - FROM events WHERE event_type = ?1 ORDER BY id DESC LIMIT ?2", - params![event_type.to_string(), limit as i64], - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(EventRow { - id: row.get::(0)?, - timestamp: row.get::(1)?, - epic_id: row.get::(2)?, - task_id: row.get::>(3)?, - event_type: row.get::(4)?, - actor: row.get::>(5)?, - payload: row.get::>(6)?, - session_id: row.get::>(7)?, - }); - } - Ok(out) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/event_store.rs b/flowctl/crates/flowctl-db/src/repo/event_store.rs deleted file mode 100644 index cfbdc415..00000000 --- a/flowctl/crates/flowctl-db/src/repo/event_store.rs +++ /dev/null @@ -1,343 +0,0 @@ -//! Async repository for the event-sourced event store. -//! -//! Distinct from [`EventRepo`](super::EventRepo) (the audit log). This repo -//! implements append-only, version-ordered streams with optimistic concurrency -//! via a unique `(stream_id, version)` constraint. - -use libsql::{params, Connection}; - -use crate::error::DbError; -use flowctl_core::events::{EventMetadata, FlowEvent}; - -/// A persisted event read back from the event store. -#[derive(Debug, Clone, serde::Serialize)] -pub struct StoredEvent { - pub event_id: i64, - pub stream_id: String, - pub version: i64, - pub event_type: String, - pub payload: FlowEvent, - pub metadata: Option, - pub created_at: String, -} - -/// Async repository for event-sourced streams. -pub struct EventStoreRepo { - conn: Connection, -} - -impl EventStoreRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Append an event to a stream. Auto-increments the version via - /// `SELECT MAX(version)+1`. Returns the assigned version number. - /// - /// Uses `INSERT OR FAIL` so a concurrent append that races on the same - /// version will fail with a constraint error rather than silently - /// overwriting. - pub async fn append( - &self, - stream_id: &str, - event: &FlowEvent, - metadata: &EventMetadata, - ) -> Result { - // Determine the next version for this stream. - let mut rows = self - .conn - .query( - "SELECT COALESCE(MAX(version), 0) FROM event_store WHERE stream_id = ?1", - params![stream_id.to_string()], - ) - .await?; - let next_version: i64 = match rows.next().await? { - Some(row) => row.get::(0)? + 1, - None => 1, - }; - - let event_type = event_type_label(event); - let payload_json = serde_json::to_string(event)?; - let metadata_json = serde_json::to_string(metadata)?; - - let result = self - .conn - .execute( - "INSERT OR FAIL INTO event_store (stream_id, version, event_type, payload, metadata) - VALUES (?1, ?2, ?3, ?4, ?5)", - params![ - stream_id.to_string(), - next_version, - event_type, - payload_json, - metadata_json - ], - ) - .await; - - match result { - Ok(_) => Ok(next_version as u64), - Err(e) => { - let msg = e.to_string(); - if msg.contains("UNIQUE constraint failed") || msg.contains("constraint") { - Err(DbError::Constraint(format!( - "concurrency conflict: version {next_version} already exists for stream {stream_id}" - ))) - } else { - Err(DbError::LibSql(e)) - } - } - } - } - - /// Query all events for a stream, in version order. - pub async fn query_stream(&self, stream_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT event_id, stream_id, version, event_type, payload, metadata, created_at - FROM event_store WHERE stream_id = ?1 ORDER BY version ASC", - params![stream_id.to_string()], - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(parse_stored_event(&row)?); - } - Ok(out) - } - - /// Query events globally by event type, in creation order. - pub async fn query_by_type(&self, event_type: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT event_id, stream_id, version, event_type, payload, metadata, created_at - FROM event_store WHERE event_type = ?1 ORDER BY event_id ASC", - params![event_type.to_string()], - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(parse_stored_event(&row)?); - } - Ok(out) - } - - /// Replay all events for a stream (same as `query_stream`, named for intent). - pub async fn rebuild_stream(&self, stream_id: &str) -> Result, DbError> { - self.query_stream(stream_id).await - } - - /// Query all events whose stream_id matches any of the given prefixes. - /// Useful for fetching all events related to an epic (epic stream + task streams). - pub async fn query_by_stream_prefixes(&self, prefixes: &[String]) -> Result, DbError> { - if prefixes.is_empty() { - return Ok(Vec::new()); - } - // Build WHERE clause: stream_id LIKE 'prefix1%' OR stream_id LIKE 'prefix2%' ... - let conditions: Vec = prefixes.iter().enumerate() - .map(|(i, _)| format!("stream_id LIKE ?{}", i + 1)) - .collect(); - let sql = format!( - "SELECT event_id, stream_id, version, event_type, payload, metadata, created_at - FROM event_store WHERE {} ORDER BY event_id ASC", - conditions.join(" OR ") - ); - - let like_params: Vec = prefixes.iter().map(|p| format!("{p}%")).collect(); - // Use positional params via libsql::params_from_iter - let values: Vec = like_params.into_iter().map(libsql::Value::from).collect(); - - let mut rows = self.conn.query(&sql, values).await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(parse_stored_event(&row)?); - } - Ok(out) - } -} - -/// Extract a human-readable event type label from a `FlowEvent`. -fn event_type_label(event: &FlowEvent) -> String { - match event { - FlowEvent::Epic(e) => format!("epic:{}", serde_json::to_value(e).unwrap_or_default().as_str().unwrap_or("unknown")), - FlowEvent::Task(t) => format!("task:{}", serde_json::to_value(t).unwrap_or_default().as_str().unwrap_or("unknown")), - } -} - -/// Parse a row from the event_store table into a `StoredEvent`. -fn parse_stored_event(row: &libsql::Row) -> Result { - let payload_str: String = row.get::(4)?; - let metadata_str: Option = row.get::>(5)?; - - Ok(StoredEvent { - event_id: row.get::(0)?, - stream_id: row.get::(1)?, - version: row.get::(2)?, - event_type: row.get::(3)?, - payload: serde_json::from_str(&payload_str)?, - metadata: match metadata_str { - Some(s) if !s.is_empty() => Some(serde_json::from_str(&s)?), - _ => None, - }, - created_at: row.get::(6)?, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool::open_memory_async; - use flowctl_core::events::{EpicEvent, TaskEvent}; - - fn test_metadata() -> EventMetadata { - EventMetadata { - actor: "test".into(), - source_cmd: "test".into(), - session_id: "sess-1".into(), - timestamp: None, - } - } - - #[tokio::test] - async fn append_auto_increments_version() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - let v1 = repo - .append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Created), &test_metadata()) - .await - .unwrap(); - assert_eq!(v1, 1); - - let v2 = repo - .append("epic:fn-1", &FlowEvent::Epic(EpicEvent::PlanWritten), &test_metadata()) - .await - .unwrap(); - assert_eq!(v2, 2); - - // Different stream starts at 1. - let v1b = repo - .append("task:fn-1.1", &FlowEvent::Task(TaskEvent::Created), &test_metadata()) - .await - .unwrap(); - assert_eq!(v1b, 1); - } - - #[tokio::test] - async fn query_stream_returns_version_order() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Created), &test_metadata()).await.unwrap(); - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::PlanWritten), &test_metadata()).await.unwrap(); - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Closed), &test_metadata()).await.unwrap(); - - let events = repo.query_stream("epic:fn-1").await.unwrap(); - assert_eq!(events.len(), 3); - assert_eq!(events[0].version, 1); - assert_eq!(events[1].version, 2); - assert_eq!(events[2].version, 3); - assert_eq!(events[0].payload, FlowEvent::Epic(EpicEvent::Created)); - assert_eq!(events[2].payload, FlowEvent::Epic(EpicEvent::Closed)); - } - - #[tokio::test] - async fn query_by_type_across_streams() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Created), &test_metadata()).await.unwrap(); - repo.append("epic:fn-2", &FlowEvent::Epic(EpicEvent::Created), &test_metadata()).await.unwrap(); - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Closed), &test_metadata()).await.unwrap(); - - let created = repo.query_by_type("epic:created").await.unwrap(); - assert_eq!(created.len(), 2); - assert_eq!(created[0].stream_id, "epic:fn-1"); - assert_eq!(created[1].stream_id, "epic:fn-2"); - } - - #[tokio::test] - async fn rebuild_stream_replays_all_events() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - repo.append("task:fn-1.1", &FlowEvent::Task(TaskEvent::Created), &test_metadata()).await.unwrap(); - repo.append("task:fn-1.1", &FlowEvent::Task(TaskEvent::Started), &test_metadata()).await.unwrap(); - repo.append("task:fn-1.1", &FlowEvent::Task(TaskEvent::Completed), &test_metadata()).await.unwrap(); - - let events = repo.rebuild_stream("task:fn-1.1").await.unwrap(); - assert_eq!(events.len(), 3); - assert_eq!(events[0].payload, FlowEvent::Task(TaskEvent::Created)); - assert_eq!(events[1].payload, FlowEvent::Task(TaskEvent::Started)); - assert_eq!(events[2].payload, FlowEvent::Task(TaskEvent::Completed)); - } - - #[tokio::test] - async fn optimistic_concurrency_conflict() { - let (_db, conn) = open_memory_async().await.unwrap(); - - // Directly insert two rows with the same (stream_id, version) to verify - // the unique constraint fires correctly. - conn.execute( - "INSERT INTO event_store (stream_id, version, event_type, payload, metadata) - VALUES ('epic:fn-1', 1, 'epic:created', '{}', '{}')", - (), - ) - .await - .unwrap(); - - // Second insert with the same stream_id + version should fail. - let result = conn - .execute( - "INSERT OR FAIL INTO event_store (stream_id, version, event_type, payload, metadata) - VALUES ('epic:fn-1', 1, 'epic:plan_written', '{}', '{}')", - (), - ) - .await; - - assert!(result.is_err(), "expected UNIQUE constraint failure"); - let err_msg = result.unwrap_err().to_string(); - assert!( - err_msg.contains("UNIQUE") || err_msg.contains("constraint"), - "expected constraint error, got: {err_msg}" - ); - } - - #[tokio::test] - async fn metadata_round_trips() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - let meta = EventMetadata { - actor: "worker-1".into(), - source_cmd: "flowctl done".into(), - session_id: "sess-xyz".into(), - timestamp: Some("2026-04-07T12:00:00Z".into()), - }; - - repo.append("epic:fn-1", &FlowEvent::Epic(EpicEvent::Created), &meta).await.unwrap(); - let events = repo.query_stream("epic:fn-1").await.unwrap(); - assert_eq!(events.len(), 1); - - let got_meta = events[0].metadata.as_ref().expect("metadata should exist"); - assert_eq!(got_meta.actor, "worker-1"); - assert_eq!(got_meta.source_cmd, "flowctl done"); - assert_eq!(got_meta.session_id, "sess-xyz"); - } - - #[tokio::test] - async fn empty_stream_returns_empty() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EventStoreRepo::new(conn); - - let events = repo.query_stream("nonexistent").await.unwrap(); - assert!(events.is_empty()); - - let rebuilt = repo.rebuild_stream("nonexistent").await.unwrap(); - assert!(rebuilt.is_empty()); - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/evidence.rs b/flowctl/crates/flowctl-db/src/repo/evidence.rs deleted file mode 100644 index d416f38e..00000000 --- a/flowctl/crates/flowctl-db/src/repo/evidence.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Async repository for task completion evidence. - -use libsql::{params, Connection}; - -use flowctl_core::types::Evidence; - -use crate::error::DbError; - -/// Async repository for task completion evidence. -pub struct EvidenceRepo { - conn: Connection, -} - -impl EvidenceRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Upsert evidence for a task. Commits and tests are stored as JSON arrays. - pub async fn upsert(&self, task_id: &str, evidence: &Evidence) -> Result<(), DbError> { - let commits_json = if evidence.commits.is_empty() { - None - } else { - Some(serde_json::to_string(&evidence.commits)?) - }; - let tests_json = if evidence.tests.is_empty() { - None - } else { - Some(serde_json::to_string(&evidence.tests)?) - }; - - self.conn - .execute( - "INSERT INTO evidence (task_id, commits, tests, files_changed, insertions, deletions, review_iters) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) - ON CONFLICT(task_id) DO UPDATE SET - commits = excluded.commits, - tests = excluded.tests, - files_changed = excluded.files_changed, - insertions = excluded.insertions, - deletions = excluded.deletions, - review_iters = excluded.review_iters", - params![ - task_id.to_string(), - commits_json, - tests_json, - evidence.files_changed.map(|v| v as i64), - evidence.insertions.map(|v| v as i64), - evidence.deletions.map(|v| v as i64), - evidence.review_iterations.map(|v| v as i64), - ], - ) - .await?; - Ok(()) - } - - /// Get evidence for a task. - pub async fn get(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT commits, tests, files_changed, insertions, deletions, review_iters - FROM evidence WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - - let Some(row) = rows.next().await? else { - return Ok(None); - }; - - let commits_json: Option = row.get::>(0)?; - let tests_json: Option = row.get::>(1)?; - let files_changed: Option = row.get::>(2)?; - let insertions: Option = row.get::>(3)?; - let deletions: Option = row.get::>(4)?; - let review_iters: Option = row.get::>(5)?; - - let commits: Vec = commits_json - .map(|s| serde_json::from_str(&s)) - .transpose()? - .unwrap_or_default(); - let tests: Vec = tests_json - .map(|s| serde_json::from_str(&s)) - .transpose()? - .unwrap_or_default(); - - Ok(Some(Evidence { - commits, - tests, - prs: Vec::new(), - files_changed: files_changed.map(|v| v as u32), - insertions: insertions.map(|v| v as u32), - deletions: deletions.map(|v| v as u32), - review_iterations: review_iters.map(|v| v as u32), - workspace_changes: None, - })) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/file_lock.rs b/flowctl/crates/flowctl-db/src/repo/file_lock.rs deleted file mode 100644 index 17f47d54..00000000 --- a/flowctl/crates/flowctl-db/src/repo/file_lock.rs +++ /dev/null @@ -1,331 +0,0 @@ -//! Async repository for runtime file locks (Teams mode concurrency). -//! -//! Uses PID-based crash detection + TTL fallback for hung processes. -//! Stale locks (dead PID or expired TTL) are auto-cleaned on `acquire()`. -//! -//! Lock modes: `write` (exclusive), `read` (shared with other reads), -//! `directory_add` (shared with reads and other directory_adds). - -use chrono::{Duration, Utc}; -use libsql::{params, Connection}; -use nix::sys::signal; -use nix::unistd::Pid; - -use crate::error::DbError; - -/// Default lock TTL: 45 minutes. -const LOCK_TTL_MINUTES: i64 = 45; - -/// Lock mode for file locking. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum LockMode { - Read, - Write, - DirectoryAdd, -} - -impl LockMode { - pub fn as_str(&self) -> &'static str { - match self { - LockMode::Read => "read", - LockMode::Write => "write", - LockMode::DirectoryAdd => "directory_add", - } - } - - pub fn from_str(s: &str) -> Result { - match s { - "read" => Ok(LockMode::Read), - "write" => Ok(LockMode::Write), - "directory_add" => Ok(LockMode::DirectoryAdd), - _ => Err(DbError::Schema(format!("invalid lock mode: {s}"))), - } - } - - /// Check if two lock modes are compatible (can coexist on the same file). - pub fn is_compatible(&self, other: &LockMode) -> bool { - matches!( - (self, other), - (LockMode::Read, LockMode::Read) - | (LockMode::Read, LockMode::DirectoryAdd) - | (LockMode::DirectoryAdd, LockMode::Read) - | (LockMode::DirectoryAdd, LockMode::DirectoryAdd) - ) - } -} - -/// A lock entry returned by `check_locks`. -#[derive(Debug, Clone)] -pub struct LockEntry { - pub task_id: String, - pub lock_mode: LockMode, -} - -/// Async repository for runtime file locks. Load-bearing for Teams-mode -/// concurrency: `acquire` on an incompatibly-locked file returns -/// `DbError::Constraint`. -pub struct FileLockRepo { - conn: Connection, -} - -impl FileLockRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Acquire a lock on a file for a task with a given mode. - /// - /// Calls `cleanup_stale()` first, then checks existing locks for - /// compatibility. Compatible locks (e.g. read+read) coexist; - /// incompatible locks return `DbError::Constraint`. - pub async fn acquire( - &self, - file_path: &str, - task_id: &str, - mode: &LockMode, - ) -> Result<(), DbError> { - self.cleanup_stale().await?; - - // Check existing locks on this file. - let existing = self.check_locks(file_path).await?; - - for entry in &existing { - if entry.task_id == task_id { - // Same task re-locking — idempotent. - return Ok(()); - } - if !mode.is_compatible(&entry.lock_mode) { - return Err(DbError::Constraint(format!( - "file already locked: {file_path} (by {} in {} mode)", - entry.task_id, - entry.lock_mode.as_str() - ))); - } - } - - let pid = std::process::id(); - let expires_at = (Utc::now() + Duration::minutes(LOCK_TTL_MINUTES)).to_rfc3339(); - - self.conn - .execute( - "INSERT OR IGNORE INTO file_locks (file_path, task_id, locked_at, holder_pid, expires_at, lock_mode) - VALUES (?1, ?2, ?3, ?4, ?5, ?6)", - params![ - file_path.to_string(), - task_id.to_string(), - Utc::now().to_rfc3339(), - pid as i64, - expires_at, - mode.as_str().to_string(), - ], - ) - .await?; - - Ok(()) - } - - /// Remove stale locks: dead PIDs (via `kill(pid, 0)`) and expired TTLs. - pub async fn cleanup_stale(&self) -> Result { - let mut total_cleaned = 0u64; - - // 1. Delete expired locks (expires_at < now). - let now = Utc::now().to_rfc3339(); - let expired = self - .conn - .execute( - "DELETE FROM file_locks WHERE expires_at IS NOT NULL AND expires_at < ?1", - params![now], - ) - .await?; - total_cleaned += expired; - - // 2. Check PIDs of remaining locks — delete dead ones. - let mut rows = self - .conn - .query( - "SELECT file_path, task_id, holder_pid FROM file_locks WHERE holder_pid IS NOT NULL", - (), - ) - .await?; - - let mut dead_keys: Vec<(String, String)> = Vec::new(); - while let Some(row) = rows.next().await? { - let fp: String = row.get(0)?; - let tid: String = row.get(1)?; - let pid: i64 = row.get(2)?; - - if !is_process_alive(pid as i32) { - dead_keys.push((fp, tid)); - } - } - - for (fp, tid) in &dead_keys { - let n = self - .conn - .execute( - "DELETE FROM file_locks WHERE file_path = ?1 AND task_id = ?2", - params![fp.clone(), tid.clone()], - ) - .await?; - total_cleaned += n; - } - - Ok(total_cleaned) - } - - /// Extend `expires_at` for all locks held by a task (heartbeat). - pub async fn heartbeat(&self, task_id: &str) -> Result { - let new_expires = (Utc::now() + Duration::minutes(LOCK_TTL_MINUTES)).to_rfc3339(); - let n = self - .conn - .execute( - "UPDATE file_locks SET expires_at = ?1 WHERE task_id = ?2", - params![new_expires, task_id.to_string()], - ) - .await?; - Ok(n) - } - - /// Release locks held by a task. Returns number of rows deleted. - pub async fn release_for_task(&self, task_id: &str) -> Result { - let n = self - .conn - .execute( - "DELETE FROM file_locks WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - Ok(n) - } - - /// Release all locks (between waves). Returns number of rows deleted. - pub async fn release_all(&self) -> Result { - let n = self.conn.execute("DELETE FROM file_locks", ()).await?; - Ok(n) - } - - /// Check all locks on a file. Returns list of (task_id, lock_mode) pairs. - pub async fn check_locks(&self, file_path: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT task_id, lock_mode FROM file_locks WHERE file_path = ?1", - params![file_path.to_string()], - ) - .await?; - - let mut entries = Vec::new(); - while let Some(row) = rows.next().await? { - let task_id: String = row.get(0)?; - let mode_str: String = row.get(1)?; - entries.push(LockEntry { - task_id, - lock_mode: LockMode::from_str(&mode_str)?, - }); - } - Ok(entries) - } - - /// Check if a file is locked. Returns the first locking task_id if so. - /// For backward compatibility — use `check_locks` for full info. - pub async fn check(&self, file_path: &str) -> Result, DbError> { - let entries = self.check_locks(file_path).await?; - Ok(entries.into_iter().next().map(|e| e.task_id)) - } -} - -/// Check if a process is alive using `kill(pid, 0)`. -fn is_process_alive(pid: i32) -> bool { - // kill(pid, 0) returns Ok if process exists, Err(ESRCH) if not. - signal::kill(Pid::from_raw(pid), None).is_ok() -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool::open_memory_async; - - #[tokio::test] - async fn lock_mode_compatibility() { - assert!(LockMode::Read.is_compatible(&LockMode::Read)); - assert!(LockMode::Read.is_compatible(&LockMode::DirectoryAdd)); - assert!(LockMode::DirectoryAdd.is_compatible(&LockMode::Read)); - assert!(LockMode::DirectoryAdd.is_compatible(&LockMode::DirectoryAdd)); - - assert!(!LockMode::Read.is_compatible(&LockMode::Write)); - assert!(!LockMode::Write.is_compatible(&LockMode::Read)); - assert!(!LockMode::Write.is_compatible(&LockMode::Write)); - assert!(!LockMode::Write.is_compatible(&LockMode::DirectoryAdd)); - assert!(!LockMode::DirectoryAdd.is_compatible(&LockMode::Write)); - } - - #[tokio::test] - async fn acquire_write_write_conflicts() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - let err = repo.acquire("src/a.rs", "fn-1.2", &LockMode::Write).await.unwrap_err(); - assert!(matches!(err, DbError::Constraint(_))); - } - - #[tokio::test] - async fn acquire_read_read_compatible() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Read).await.unwrap(); - repo.acquire("src/a.rs", "fn-1.2", &LockMode::Read).await.unwrap(); - - let entries = repo.check_locks("src/a.rs").await.unwrap(); - assert_eq!(entries.len(), 2); - } - - #[tokio::test] - async fn acquire_read_write_conflicts() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Read).await.unwrap(); - let err = repo.acquire("src/a.rs", "fn-1.2", &LockMode::Write).await.unwrap_err(); - assert!(matches!(err, DbError::Constraint(_))); - } - - #[tokio::test] - async fn acquire_directory_add_compatible() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/", "fn-1.1", &LockMode::DirectoryAdd).await.unwrap(); - repo.acquire("src/", "fn-1.2", &LockMode::DirectoryAdd).await.unwrap(); - repo.acquire("src/", "fn-1.3", &LockMode::Read).await.unwrap(); - - let entries = repo.check_locks("src/").await.unwrap(); - assert_eq!(entries.len(), 3); - - // Write conflicts with directory_add - let err = repo.acquire("src/", "fn-1.4", &LockMode::Write).await.unwrap_err(); - assert!(matches!(err, DbError::Constraint(_))); - } - - #[tokio::test] - async fn acquire_idempotent_same_task() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - assert_eq!(repo.check("src/a.rs").await.unwrap().as_deref(), Some("fn-1.1")); - } - - #[tokio::test] - async fn release_and_reacquire() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.release_for_task("fn-1.1").await.unwrap(); - repo.acquire("src/a.rs", "fn-1.2", &LockMode::Write).await.unwrap(); - assert_eq!(repo.check("src/a.rs").await.unwrap().as_deref(), Some("fn-1.2")); - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/file_ownership.rs b/flowctl/crates/flowctl-db/src/repo/file_ownership.rs deleted file mode 100644 index 92e652aa..00000000 --- a/flowctl/crates/flowctl-db/src/repo/file_ownership.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Async repository for file ownership edges. - -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// Async repository for file ownership edges. -pub struct FileOwnershipRepo { - conn: Connection, -} - -impl FileOwnershipRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - pub async fn add(&self, file_path: &str, task_id: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT OR IGNORE INTO file_ownership (file_path, task_id) VALUES (?1, ?2)", - params![file_path.to_string(), task_id.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn remove(&self, file_path: &str, task_id: &str) -> Result<(), DbError> { - self.conn - .execute( - "DELETE FROM file_ownership WHERE file_path = ?1 AND task_id = ?2", - params![file_path.to_string(), task_id.to_string()], - ) - .await?; - Ok(()) - } - - pub async fn list_for_task(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT file_path FROM file_ownership WHERE task_id = ?1 ORDER BY file_path", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } - - pub async fn list_for_file(&self, file_path: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT task_id FROM file_ownership WHERE file_path = ?1 ORDER BY task_id", - params![file_path.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/gap.rs b/flowctl/crates/flowctl-db/src/repo/gap.rs deleted file mode 100644 index c18e91cc..00000000 --- a/flowctl/crates/flowctl-db/src/repo/gap.rs +++ /dev/null @@ -1,158 +0,0 @@ -//! Async repository for the gaps registry. - -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// A row from the gaps table. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct GapRow { - pub id: i64, - pub epic_id: String, - pub capability: String, - pub priority: String, - pub source: Option, - pub status: String, - pub resolved_at: Option, - pub evidence: Option, - pub task_id: Option, - pub created_at: String, -} - -/// Async repository for the gaps registry. -pub struct GapRepo { - conn: Connection, -} - -impl GapRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Add a new gap. - pub async fn add( - &self, - epic_id: &str, - capability: &str, - priority: &str, - source: Option<&str>, - task_id: Option<&str>, - ) -> Result { - self.conn - .execute( - "INSERT INTO gaps (epic_id, capability, priority, source, task_id) - VALUES (?1, ?2, ?3, ?4, ?5)", - params![ - epic_id.to_string(), - capability.to_string(), - priority.to_string(), - source.map(std::string::ToString::to_string), - task_id.map(std::string::ToString::to_string), - ], - ) - .await?; - - // Return the last inserted rowid. - let mut rows = self - .conn - .query("SELECT last_insert_rowid()", ()) - .await?; - let row = rows.next().await?.ok_or_else(|| { - DbError::NotFound("last_insert_rowid".to_string()) - })?; - Ok(row.get::(0)?) - } - - /// List gaps for an epic, optionally filtered by status. - pub async fn list( - &self, - epic_id: &str, - status: Option<&str>, - ) -> Result, DbError> { - let mut rows = match status { - Some(s) => { - self.conn - .query( - "SELECT id, epic_id, capability, priority, source, status, resolved_at, evidence, task_id, created_at - FROM gaps WHERE epic_id = ?1 AND status = ?2 ORDER BY id", - params![epic_id.to_string(), s.to_string()], - ) - .await? - } - None => { - self.conn - .query( - "SELECT id, epic_id, capability, priority, source, status, resolved_at, evidence, task_id, created_at - FROM gaps WHERE epic_id = ?1 ORDER BY id", - params![epic_id.to_string()], - ) - .await? - } - }; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(GapRow { - id: row.get::(0)?, - epic_id: row.get::(1)?, - capability: row.get::(2)?, - priority: row.get::(3)?, - source: row.get::>(4)?, - status: row.get::(5)?, - resolved_at: row.get::>(6)?, - evidence: row.get::>(7)?, - task_id: row.get::>(8)?, - created_at: row.get::(9)?, - }); - } - Ok(out) - } - - /// Remove a gap by ID. - pub async fn remove(&self, id: i64) -> Result<(), DbError> { - self.conn - .execute("DELETE FROM gaps WHERE id = ?1", params![id]) - .await?; - Ok(()) - } - - /// Remove all gaps for an epic. - pub async fn remove_all(&self, epic_id: &str) -> Result { - let n = self - .conn - .execute( - "DELETE FROM gaps WHERE epic_id = ?1", - params![epic_id.to_string()], - ) - .await?; - Ok(n) - } - - /// Resolve a gap by ID. - pub async fn resolve(&self, id: i64, evidence: &str) -> Result<(), DbError> { - self.conn - .execute( - "UPDATE gaps SET status = 'resolved', resolved_at = datetime('now'), evidence = ?1 WHERE id = ?2", - params![evidence.to_string(), id], - ) - .await?; - Ok(()) - } - - /// Resolve a gap by capability name within an epic. - pub async fn resolve_by_capability( - &self, - epic_id: &str, - capability: &str, - evidence: &str, - ) -> Result<(), DbError> { - self.conn - .execute( - "UPDATE gaps SET status = 'resolved', resolved_at = datetime('now'), evidence = ?1 - WHERE epic_id = ?2 AND capability = ?3 AND status = 'open'", - params![evidence.to_string(), epic_id.to_string(), capability.to_string()], - ) - .await?; - Ok(()) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/helpers.rs b/flowctl/crates/flowctl-db/src/repo/helpers.rs deleted file mode 100644 index f46c5724..00000000 --- a/flowctl/crates/flowctl-db/src/repo/helpers.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! Parsing helpers and ID-extraction utilities shared across repo sub-modules. - -use chrono::{DateTime, Utc}; -use libsql::{params, Connection}; - -use flowctl_core::state_machine::Status; -use flowctl_core::types::{Domain, EpicStatus, ReviewStatus}; - -use crate::error::DbError; - -// ── Parsing helpers ───────────────────────────────────────────────── - -pub(crate) fn parse_status(s: &str) -> Status { - Status::parse(s).unwrap_or_default() -} - -pub(crate) fn parse_epic_status(s: &str) -> EpicStatus { - match s { - "done" => EpicStatus::Done, - _ => EpicStatus::Open, - } -} - -pub(crate) fn parse_review_status(s: &str) -> ReviewStatus { - match s { - "passed" => ReviewStatus::Passed, - "failed" => ReviewStatus::Failed, - _ => ReviewStatus::Unknown, - } -} - -pub(crate) fn parse_domain(s: &str) -> Domain { - match s { - "frontend" => Domain::Frontend, - "backend" => Domain::Backend, - "architecture" => Domain::Architecture, - "testing" => Domain::Testing, - "docs" => Domain::Docs, - "ops" => Domain::Ops, - _ => Domain::General, - } -} - -pub(crate) fn parse_datetime(s: &str) -> DateTime { - DateTime::parse_from_rfc3339(s) - .map(|dt| dt.with_timezone(&Utc)) - .unwrap_or_else(|_| Utc::now()) -} - -// ── Max-ID queries ───────────────────────────────────────────────── - -/// Extract the maximum epic number from existing epic IDs. -/// Epic IDs follow the format `fn-N-slug`, where N is the number. -pub async fn max_epic_num(conn: &Connection) -> Result { - let mut rows = conn - .query("SELECT id FROM epics", ()) - .await?; - - let mut max_n: i64 = 0; - while let Some(row) = rows.next().await? { - let id: String = row.get(0)?; - if let Some(n) = parse_epic_number(&id) { - if n > max_n { - max_n = n; - } - } - } - Ok(max_n) -} - -/// Extract the maximum task number for a given epic. -/// Task IDs follow the format `.N`. -pub async fn max_task_num(conn: &Connection, epic_id: &str) -> Result { - let mut rows = conn - .query( - "SELECT id FROM tasks WHERE epic_id = ?1", - params![epic_id.to_string()], - ) - .await?; - - let mut max_n: i64 = 0; - while let Some(row) = rows.next().await? { - let id: String = row.get(0)?; - if let Some(n) = parse_task_number(&id) { - if n > max_n { - max_n = n; - } - } - } - Ok(max_n) -} - -/// Parse the numeric portion from an epic ID (fn-N or fn-N-slug). -fn parse_epic_number(id: &str) -> Option { - let parts: Vec<&str> = id.splitn(3, '-').collect(); - if parts.len() >= 2 && parts[0] == "fn" { - parts[1].parse::().ok() - } else { - None - } -} - -/// Parse the task number from a task ID (.N). -fn parse_task_number(id: &str) -> Option { - let dot_pos = id.rfind('.')?; - id[dot_pos + 1..].parse::().ok() -} diff --git a/flowctl/crates/flowctl-db/src/repo/mod.rs b/flowctl/crates/flowctl-db/src/repo/mod.rs deleted file mode 100644 index aa5407dd..00000000 --- a/flowctl/crates/flowctl-db/src/repo/mod.rs +++ /dev/null @@ -1,635 +0,0 @@ -//! Async repository abstractions over libSQL. -//! -//! Ported from `flowctl-db::repo` (sync rusqlite). Each repo owns a -//! `libsql::Connection` (cheap Clone) and exposes async methods that -//! return `DbError`. Mirrors the sync API surface where it makes sense. - -mod deps; -mod epic; -mod event; -mod event_store; -mod evidence; -mod file_lock; -mod file_ownership; -mod gap; -pub(crate) mod helpers; -mod phase_progress; -mod runtime; -mod scout_cache; -mod task; - -pub use deps::DepRepo; -pub use epic::EpicRepo; -pub use event::{EventRepo, EventRow}; -pub use event_store::{EventStoreRepo, StoredEvent}; -pub use evidence::EvidenceRepo; -pub use file_lock::{FileLockRepo, LockEntry, LockMode}; -pub use file_ownership::FileOwnershipRepo; -pub use gap::{GapRepo, GapRow}; -pub use helpers::{max_epic_num, max_task_num}; -pub use phase_progress::PhaseProgressRepo; -pub use runtime::RuntimeRepo; -pub use scout_cache::ScoutCacheRepo; -pub use task::TaskRepo; - -#[cfg(test)] -mod tests { - use super::*; - use crate::error::DbError; - use crate::pool::open_memory_async; - use chrono::Utc; - use flowctl_core::types::{Domain, Epic, EpicStatus, Evidence, ReviewStatus, RuntimeState, Task}; - use flowctl_core::state_machine::Status; - - fn sample_epic(id: &str) -> Epic { - let now = Utc::now(); - Epic { - schema_version: 1, - id: id.to_string(), - title: format!("Title of {id}"), - status: EpicStatus::Open, - branch_name: Some("feat/x".to_string()), - plan_review: ReviewStatus::Unknown, - completion_review: ReviewStatus::Unknown, - depends_on_epics: Vec::new(), - default_impl: None, - default_review: None, - default_sync: None, - auto_execute_pending: None, - auto_execute_set_at: None, - archived: false, - file_path: Some(format!("epics/{id}.md")), - created_at: now, - updated_at: now, - } - } - - fn sample_task(epic: &str, id: &str) -> Task { - let now = Utc::now(); - Task { - schema_version: 1, - id: id.to_string(), - epic: epic.to_string(), - title: format!("Task {id}"), - status: Status::Todo, - priority: Some(1), - domain: Domain::Backend, - depends_on: Vec::new(), - files: Vec::new(), - r#impl: None, - review: None, - sync: None, - file_path: Some(format!("tasks/{id}.md")), - created_at: now, - updated_at: now, - } - } - - #[tokio::test] - async fn epic_upsert_get_roundtrip() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EpicRepo::new(conn.clone()); - - let e = sample_epic("fn-1-test"); - repo.upsert(&e).await.unwrap(); - - let got = repo.get("fn-1-test").await.unwrap(); - assert_eq!(got.id, "fn-1-test"); - assert_eq!(got.title, "Title of fn-1-test"); - assert_eq!(got.branch_name.as_deref(), Some("feat/x")); - assert!(matches!(got.status, EpicStatus::Open)); - } - - #[tokio::test] - async fn epic_upsert_with_body_preserves() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EpicRepo::new(conn.clone()); - let e = sample_epic("fn-2-body"); - - repo.upsert_with_body(&e, "# Body v1").await.unwrap(); - let (_, body) = repo.get_with_body("fn-2-body").await.unwrap(); - assert_eq!(body, "# Body v1"); - - // Empty body preserves existing. - repo.upsert_with_body(&e, "").await.unwrap(); - let (_, body2) = repo.get_with_body("fn-2-body").await.unwrap(); - assert_eq!(body2, "# Body v1"); - - // Non-empty overwrites. - repo.upsert_with_body(&e, "# Body v2").await.unwrap(); - let (_, body3) = repo.get_with_body("fn-2-body").await.unwrap(); - assert_eq!(body3, "# Body v2"); - } - - #[tokio::test] - async fn epic_list_and_update_status_and_delete() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EpicRepo::new(conn.clone()); - - repo.upsert(&sample_epic("fn-a")).await.unwrap(); - repo.upsert(&sample_epic("fn-b")).await.unwrap(); - - let all = repo.list(None).await.unwrap(); - assert_eq!(all.len(), 2); - - repo.update_status("fn-a", EpicStatus::Done).await.unwrap(); - let done = repo.list(Some("done")).await.unwrap(); - assert_eq!(done.len(), 1); - assert_eq!(done[0].id, "fn-a"); - - repo.delete("fn-b").await.unwrap(); - let remaining = repo.list(None).await.unwrap(); - assert_eq!(remaining.len(), 1); - - let err = repo.get("nope").await.unwrap_err(); - assert!(matches!(err, DbError::NotFound(_))); - } - - #[tokio::test] - async fn epic_get_missing_is_not_found() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EpicRepo::new(conn.clone()); - let err = repo.get("does-not-exist").await.unwrap_err(); - assert!(matches!(err, DbError::NotFound(_))); - } - - #[tokio::test] - async fn task_upsert_get_with_deps_and_files() { - let (_db, conn) = open_memory_async().await.unwrap(); - let erepo = EpicRepo::new(conn.clone()); - erepo.upsert(&sample_epic("fn-1")).await.unwrap(); - - let trepo = TaskRepo::new(conn.clone()); - let mut t = sample_task("fn-1", "fn-1.1"); - t.depends_on = vec!["fn-1.0".to_string()]; - t.files = vec!["src/a.rs".to_string(), "src/b.rs".to_string()]; - trepo.upsert(&t).await.unwrap(); - - let got = trepo.get("fn-1.1").await.unwrap(); - assert_eq!(got.epic, "fn-1"); - assert_eq!(got.priority, Some(1)); - assert!(matches!(got.domain, Domain::Backend)); - assert_eq!(got.depends_on, vec!["fn-1.0".to_string()]); - assert_eq!(got.files.len(), 2); - assert!(got.files.contains(&"src/a.rs".to_string())); - } - - #[tokio::test] - async fn task_list_by_epic_status_domain() { - let (_db, conn) = open_memory_async().await.unwrap(); - let erepo = EpicRepo::new(conn.clone()); - erepo.upsert(&sample_epic("fn-1")).await.unwrap(); - erepo.upsert(&sample_epic("fn-2")).await.unwrap(); - - let trepo = TaskRepo::new(conn.clone()); - let mut t1 = sample_task("fn-1", "fn-1.1"); - let mut t2 = sample_task("fn-1", "fn-1.2"); - t2.domain = Domain::Frontend; - let t3 = sample_task("fn-2", "fn-2.1"); - trepo.upsert(&t1).await.unwrap(); - trepo.upsert(&t2).await.unwrap(); - trepo.upsert(&t3).await.unwrap(); - - let ep1 = trepo.list_by_epic("fn-1").await.unwrap(); - assert_eq!(ep1.len(), 2); - - let all = trepo.list_all(None, None).await.unwrap(); - assert_eq!(all.len(), 3); - - let fe = trepo.list_all(None, Some("frontend")).await.unwrap(); - assert_eq!(fe.len(), 1); - assert_eq!(fe[0].id, "fn-1.2"); - - t1.status = Status::Done; - trepo.upsert(&t1).await.unwrap(); - let done = trepo.list_by_status(Status::Done).await.unwrap(); - assert_eq!(done.len(), 1); - - let todo_fe = trepo - .list_all(Some("todo"), Some("frontend")) - .await - .unwrap(); - assert_eq!(todo_fe.len(), 1); - } - - #[tokio::test] - async fn task_update_status_and_delete() { - let (_db, conn) = open_memory_async().await.unwrap(); - EpicRepo::new(conn.clone()) - .upsert(&sample_epic("fn-1")) - .await - .unwrap(); - - let trepo = TaskRepo::new(conn.clone()); - let mut t = sample_task("fn-1", "fn-1.1"); - t.depends_on = vec!["fn-1.0".to_string()]; - t.files = vec!["src/a.rs".to_string()]; - trepo.upsert(&t).await.unwrap(); - - trepo - .update_status("fn-1.1", Status::InProgress) - .await - .unwrap(); - let got = trepo.get("fn-1.1").await.unwrap(); - assert!(matches!(got.status, Status::InProgress)); - - trepo.delete("fn-1.1").await.unwrap(); - assert!(matches!( - trepo.get("fn-1.1").await.unwrap_err(), - DbError::NotFound(_) - )); - - // Update missing -> NotFound. - let err = trepo - .update_status("missing", Status::Done) - .await - .unwrap_err(); - assert!(matches!(err, DbError::NotFound(_))); - } - - #[tokio::test] - async fn dep_repo_add_list_remove() { - let (_db, conn) = open_memory_async().await.unwrap(); - let deps = DepRepo::new(conn.clone()); - - deps.add_task_dep("fn-1.2", "fn-1.1").await.unwrap(); - deps.add_task_dep("fn-1.2", "fn-1.0").await.unwrap(); - // Idempotent. - deps.add_task_dep("fn-1.2", "fn-1.1").await.unwrap(); - - let mut got = deps.list_task_deps("fn-1.2").await.unwrap(); - got.sort(); - assert_eq!(got, vec!["fn-1.0".to_string(), "fn-1.1".to_string()]); - - deps.remove_task_dep("fn-1.2", "fn-1.1").await.unwrap(); - let after = deps.list_task_deps("fn-1.2").await.unwrap(); - assert_eq!(after, vec!["fn-1.0".to_string()]); - - deps.add_epic_dep("fn-2", "fn-1").await.unwrap(); - deps.add_epic_dep("fn-2", "fn-0").await.unwrap(); - let mut elist = deps.list_epic_deps("fn-2").await.unwrap(); - elist.sort(); - assert_eq!(elist, vec!["fn-0".to_string(), "fn-1".to_string()]); - - deps.remove_epic_dep("fn-2", "fn-0").await.unwrap(); - assert_eq!( - deps.list_epic_deps("fn-2").await.unwrap(), - vec!["fn-1".to_string()] - ); - } - - #[tokio::test] - async fn dep_repo_reverse_deps_and_transitive() { - let (_db, conn) = open_memory_async().await.unwrap(); - let deps = DepRepo::new(conn.clone()); - - // Build chain: fn-1.1 -> fn-1.2 -> fn-1.3, fn-1.1 -> fn-1.4 - deps.add_task_dep("fn-1.2", "fn-1.1").await.unwrap(); - deps.add_task_dep("fn-1.3", "fn-1.2").await.unwrap(); - deps.add_task_dep("fn-1.4", "fn-1.1").await.unwrap(); - - // Direct dependents of fn-1.1: fn-1.2 and fn-1.4 - let direct = deps.list_dependents("fn-1.1").await.unwrap(); - assert_eq!(direct, vec!["fn-1.2".to_string(), "fn-1.4".to_string()]); - - // Direct dependents of fn-1.2: fn-1.3 - let direct2 = deps.list_dependents("fn-1.2").await.unwrap(); - assert_eq!(direct2, vec!["fn-1.3".to_string()]); - - // No dependents of fn-1.3 - let direct3 = deps.list_dependents("fn-1.3").await.unwrap(); - assert!(direct3.is_empty()); - - // Transitive dependents of fn-1.1: fn-1.2, fn-1.3, fn-1.4 - let all = deps.list_all_dependents("fn-1.1").await.unwrap(); - assert_eq!( - all, - vec!["fn-1.2".to_string(), "fn-1.3".to_string(), "fn-1.4".to_string()] - ); - - // Transitive dependents of fn-1.2: fn-1.3 - let all2 = deps.list_all_dependents("fn-1.2").await.unwrap(); - assert_eq!(all2, vec!["fn-1.3".to_string()]); - - // Remove fn-1.2 -> fn-1.1 dep: reverse index should update - deps.remove_task_dep("fn-1.2", "fn-1.1").await.unwrap(); - let after = deps.list_dependents("fn-1.1").await.unwrap(); - assert_eq!(after, vec!["fn-1.4".to_string()]); - - // Transitive from fn-1.1 no longer includes fn-1.2 or fn-1.3 - let all_after = deps.list_all_dependents("fn-1.1").await.unwrap(); - assert_eq!(all_after, vec!["fn-1.4".to_string()]); - } - - #[tokio::test] - async fn file_ownership_repo_roundtrip() { - let (_db, conn) = open_memory_async().await.unwrap(); - let f = FileOwnershipRepo::new(conn.clone()); - - f.add("src/a.rs", "fn-1.1").await.unwrap(); - f.add("src/b.rs", "fn-1.1").await.unwrap(); - f.add("src/a.rs", "fn-1.2").await.unwrap(); - // Idempotent. - f.add("src/a.rs", "fn-1.1").await.unwrap(); - - let mut t1 = f.list_for_task("fn-1.1").await.unwrap(); - t1.sort(); - assert_eq!(t1, vec!["src/a.rs".to_string(), "src/b.rs".to_string()]); - - let mut owners = f.list_for_file("src/a.rs").await.unwrap(); - owners.sort(); - assert_eq!(owners, vec!["fn-1.1".to_string(), "fn-1.2".to_string()]); - - f.remove("src/a.rs", "fn-1.2").await.unwrap(); - let owners2 = f.list_for_file("src/a.rs").await.unwrap(); - assert_eq!(owners2, vec!["fn-1.1".to_string()]); - } - - // ── RuntimeRepo ───────────────────────────────────────────────── - - #[tokio::test] - async fn runtime_upsert_get_roundtrip() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = RuntimeRepo::new(conn.clone()); - let now = Utc::now(); - let state = RuntimeState { - task_id: "fn-1.1".to_string(), - assignee: Some("worker-1".to_string()), - claimed_at: Some(now), - completed_at: None, - duration_secs: Some(42), - blocked_reason: None, - baseline_rev: Some("abc123".to_string()), - final_rev: None, - retry_count: 2, - }; - repo.upsert(&state).await.unwrap(); - - let got = repo.get("fn-1.1").await.unwrap().expect("should exist"); - assert_eq!(got.task_id, "fn-1.1"); - assert_eq!(got.assignee.as_deref(), Some("worker-1")); - assert_eq!(got.duration_secs, Some(42)); - assert_eq!(got.baseline_rev.as_deref(), Some("abc123")); - assert_eq!(got.retry_count, 2); - assert!(got.claimed_at.is_some()); - assert!(got.completed_at.is_none()); - - // Update (upsert) the same task. - let updated = RuntimeState { - retry_count: 3, - final_rev: Some("def456".to_string()), - ..state - }; - repo.upsert(&updated).await.unwrap(); - let got2 = repo.get("fn-1.1").await.unwrap().unwrap(); - assert_eq!(got2.retry_count, 3); - assert_eq!(got2.final_rev.as_deref(), Some("def456")); - } - - #[tokio::test] - async fn runtime_get_missing_returns_none() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = RuntimeRepo::new(conn.clone()); - assert!(repo.get("does-not-exist").await.unwrap().is_none()); - } - - // ── EvidenceRepo ──────────────────────────────────────────────── - - #[tokio::test] - async fn evidence_upsert_get_roundtrip() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EvidenceRepo::new(conn.clone()); - let ev = Evidence { - commits: vec!["abc123".to_string(), "def456".to_string()], - tests: vec!["cargo test".to_string(), "bash smoke.sh".to_string()], - prs: Vec::new(), - files_changed: Some(5), - insertions: Some(120), - deletions: Some(30), - review_iterations: Some(1), - workspace_changes: None, - }; - repo.upsert("fn-1.1", &ev).await.unwrap(); - - let got = repo.get("fn-1.1").await.unwrap().expect("should exist"); - assert_eq!(got.commits, vec!["abc123".to_string(), "def456".to_string()]); - assert_eq!( - got.tests, - vec!["cargo test".to_string(), "bash smoke.sh".to_string()] - ); - assert_eq!(got.files_changed, Some(5)); - assert_eq!(got.insertions, Some(120)); - assert_eq!(got.deletions, Some(30)); - assert_eq!(got.review_iterations, Some(1)); - } - - #[tokio::test] - async fn evidence_get_missing_returns_none() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EvidenceRepo::new(conn.clone()); - assert!(repo.get("nope").await.unwrap().is_none()); - } - - #[tokio::test] - async fn evidence_empty_vecs_roundtrip() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = EvidenceRepo::new(conn.clone()); - let ev = Evidence { - commits: Vec::new(), - tests: Vec::new(), - prs: Vec::new(), - files_changed: None, - insertions: None, - deletions: None, - review_iterations: None, - workspace_changes: None, - }; - repo.upsert("fn-2.1", &ev).await.unwrap(); - let got = repo.get("fn-2.1").await.unwrap().unwrap(); - assert!(got.commits.is_empty()); - assert!(got.tests.is_empty()); - assert_eq!(got.files_changed, None); - } - - // ── FileLockRepo ──────────────────────────────────────────────── - - #[tokio::test] - async fn file_lock_acquire_twice_conflicts() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - let err = repo.acquire("src/a.rs", "fn-1.2", &LockMode::Write).await.unwrap_err(); - assert!( - matches!(err, DbError::Constraint(_)), - "expected Constraint, got {err:?}" - ); - } - - #[tokio::test] - async fn file_lock_release_for_task_and_check() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.acquire("src/b.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.acquire("src/c.rs", "fn-1.2", &LockMode::Write).await.unwrap(); - - assert_eq!( - repo.check("src/a.rs").await.unwrap().as_deref(), - Some("fn-1.1") - ); - assert!(repo.check("src/missing.rs").await.unwrap().is_none()); - - let n = repo.release_for_task("fn-1.1").await.unwrap(); - assert_eq!(n, 2); - assert!(repo.check("src/a.rs").await.unwrap().is_none()); - assert!(repo.check("src/b.rs").await.unwrap().is_none()); - // fn-1.2 still holds its lock. - assert_eq!( - repo.check("src/c.rs").await.unwrap().as_deref(), - Some("fn-1.2") - ); - - // Re-acquiring a released file works. - repo.acquire("src/a.rs", "fn-1.3", &LockMode::Write).await.unwrap(); - assert_eq!( - repo.check("src/a.rs").await.unwrap().as_deref(), - Some("fn-1.3") - ); - - // release_all clears remaining locks. - let n2 = repo.release_all().await.unwrap(); - assert_eq!(n2, 2); - assert!(repo.check("src/a.rs").await.unwrap().is_none()); - assert!(repo.check("src/c.rs").await.unwrap().is_none()); - } - - #[tokio::test] - async fn file_lock_idempotent_reacquire() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - // Acquiring the same file for the same task twice should succeed (idempotent). - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - assert_eq!( - repo.check("src/a.rs").await.unwrap().as_deref(), - Some("fn-1.1") - ); - } - - #[tokio::test] - async fn file_lock_expired_cleanup() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - // Insert a lock with an already-expired TTL directly. - let past = (chrono::Utc::now() - chrono::Duration::minutes(1)).to_rfc3339(); - conn.execute( - "INSERT INTO file_locks (file_path, task_id, locked_at, holder_pid, expires_at) - VALUES ('src/expired.rs', 'fn-old', ?1, 99999, ?2)", - libsql::params![past.clone(), past], - ) - .await - .unwrap(); - - // The expired lock should be visible before cleanup. - assert!(repo.check("src/expired.rs").await.unwrap().is_some()); - - // cleanup_stale should remove it. - let cleaned = repo.cleanup_stale().await.unwrap(); - assert!(cleaned >= 1); - assert!(repo.check("src/expired.rs").await.unwrap().is_none()); - } - - #[tokio::test] - async fn file_lock_heartbeat_extends_ttl() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - repo.acquire("src/b.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - - let extended = repo.heartbeat("fn-1.1").await.unwrap(); - assert_eq!(extended, 2); - - // Heartbeat on a non-existent task returns 0. - let none = repo.heartbeat("fn-nonexistent").await.unwrap(); - assert_eq!(none, 0); - } - - #[tokio::test] - async fn file_lock_acquire_cleans_expired_before_insert() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = FileLockRepo::new(conn.clone()); - - // Insert expired lock for a file. - let past = (chrono::Utc::now() - chrono::Duration::minutes(1)).to_rfc3339(); - conn.execute( - "INSERT INTO file_locks (file_path, task_id, locked_at, holder_pid, expires_at) - VALUES ('src/a.rs', 'fn-old', ?1, 99999, ?2)", - libsql::params![past.clone(), past], - ) - .await - .unwrap(); - - // Acquiring the same file should succeed because the old lock is expired. - repo.acquire("src/a.rs", "fn-1.1", &LockMode::Write).await.unwrap(); - assert_eq!( - repo.check("src/a.rs").await.unwrap().as_deref(), - Some("fn-1.1") - ); - } - - // ── PhaseProgressRepo ─────────────────────────────────────────── - - #[tokio::test] - async fn event_repo_insert_list_by_epic_and_type() { - let (_db, conn) = open_memory_async().await.unwrap(); - // Need an epic row since events.epic_id is TEXT NOT NULL (no FK but we'll be honest). - conn.execute( - "INSERT INTO epics (id, title, status, file_path, created_at, updated_at) - VALUES ('fn-9-evt', 'Evt Test', 'open', 'e.md', '2025-01-01T00:00:00Z', '2025-01-01T00:00:00Z')", - (), - ).await.unwrap(); - - let repo = EventRepo::new(conn.clone()); - let id1 = repo.insert("fn-9-evt", Some("fn-9-evt.1"), "task_started", Some("w1"), None, None).await.unwrap(); - let id2 = repo.insert("fn-9-evt", Some("fn-9-evt.1"), "task_completed", Some("w1"), Some("{}"), None).await.unwrap(); - let id3 = repo.insert("fn-9-evt", Some("fn-9-evt.2"), "task_started", Some("w1"), None, None).await.unwrap(); - assert!(id1 > 0 && id2 > id1 && id3 > id2); - - let by_epic = repo.list_by_epic("fn-9-evt", 10).await.unwrap(); - assert_eq!(by_epic.len(), 3); - // Most recent first. - assert_eq!(by_epic[0].id, id3); - - let started = repo.list_by_type("task_started", 10).await.unwrap(); - assert_eq!(started.len(), 2); - let completed = repo.list_by_type("task_completed", 10).await.unwrap(); - assert_eq!(completed.len(), 1); - assert_eq!(completed[0].payload.as_deref(), Some("{}")); - } - - #[tokio::test] - async fn phase_progress_mark_done_and_get() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = PhaseProgressRepo::new(conn.clone()); - - repo.mark_done("fn-1.1", "plan").await.unwrap(); - repo.mark_done("fn-1.1", "implement").await.unwrap(); - - let phases = repo.get_completed("fn-1.1").await.unwrap(); - assert_eq!(phases, vec!["plan".to_string(), "implement".to_string()]); - - // Idempotent re-mark. - repo.mark_done("fn-1.1", "plan").await.unwrap(); - let phases2 = repo.get_completed("fn-1.1").await.unwrap(); - assert_eq!(phases2.len(), 2); - - let n = repo.reset("fn-1.1").await.unwrap(); - assert_eq!(n, 2); - assert!(repo.get_completed("fn-1.1").await.unwrap().is_empty()); - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/phase_progress.rs b/flowctl/crates/flowctl-db/src/repo/phase_progress.rs deleted file mode 100644 index c881801d..00000000 --- a/flowctl/crates/flowctl-db/src/repo/phase_progress.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! Async repository for worker-phase progress tracking. - -use chrono::Utc; -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// Async repository for worker-phase progress tracking. -pub struct PhaseProgressRepo { - conn: Connection, -} - -impl PhaseProgressRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Get all completed phases for a task, in rowid (insertion) order. - pub async fn get_completed(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT phase FROM phase_progress WHERE task_id = ?1 AND status = 'done' ORDER BY rowid", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } - - /// Mark a phase as done. - pub async fn mark_done(&self, task_id: &str, phase: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT INTO phase_progress (task_id, phase, status, completed_at) - VALUES (?1, ?2, 'done', ?3) - ON CONFLICT(task_id, phase) DO UPDATE SET - status = 'done', - completed_at = excluded.completed_at", - params![ - task_id.to_string(), - phase.to_string(), - Utc::now().to_rfc3339(), - ], - ) - .await?; - Ok(()) - } - - /// Reset all phase progress for a task. Returns number of rows deleted. - pub async fn reset(&self, task_id: &str) -> Result { - let n = self - .conn - .execute( - "DELETE FROM phase_progress WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - Ok(n) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/runtime.rs b/flowctl/crates/flowctl-db/src/repo/runtime.rs deleted file mode 100644 index 4f5f5b20..00000000 --- a/flowctl/crates/flowctl-db/src/repo/runtime.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! Async repository for per-task runtime state (Teams mode assignment, timing). - -use libsql::{params, Connection}; - -use flowctl_core::types::RuntimeState; - -use crate::error::DbError; - -use super::helpers::parse_datetime; - -/// Async repository for per-task runtime state (Teams mode assignment, timing). -pub struct RuntimeRepo { - conn: Connection, -} - -impl RuntimeRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Upsert runtime state for a task. - pub async fn upsert(&self, state: &RuntimeState) -> Result<(), DbError> { - self.conn - .execute( - "INSERT INTO runtime_state (task_id, assignee, claimed_at, completed_at, duration_secs, blocked_reason, baseline_rev, final_rev, retry_count) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9) - ON CONFLICT(task_id) DO UPDATE SET - assignee = excluded.assignee, - claimed_at = excluded.claimed_at, - completed_at = excluded.completed_at, - duration_secs = excluded.duration_secs, - blocked_reason = excluded.blocked_reason, - baseline_rev = excluded.baseline_rev, - final_rev = excluded.final_rev, - retry_count = excluded.retry_count", - params![ - state.task_id.clone(), - state.assignee.clone(), - state.claimed_at.map(|dt| dt.to_rfc3339()), - state.completed_at.map(|dt| dt.to_rfc3339()), - state.duration_secs.map(|d| d as i64), - state.blocked_reason.clone(), - state.baseline_rev.clone(), - state.final_rev.clone(), - state.retry_count as i64, - ], - ) - .await?; - Ok(()) - } - - /// Get runtime state for a task. - pub async fn get(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT task_id, assignee, claimed_at, completed_at, duration_secs, blocked_reason, baseline_rev, final_rev, retry_count - FROM runtime_state WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - - let Some(row) = rows.next().await? else { - return Ok(None); - }; - - let claimed_s: Option = row.get::>(2)?; - let completed_s: Option = row.get::>(3)?; - let duration: Option = row.get::>(4)?; - let retry: i64 = row.get::(8)?; - - Ok(Some(RuntimeState { - task_id: row.get::(0)?, - assignee: row.get::>(1)?, - claimed_at: claimed_s.as_deref().map(parse_datetime), - completed_at: completed_s.as_deref().map(parse_datetime), - duration_secs: duration.map(|d| d as u64), - blocked_reason: row.get::>(5)?, - baseline_rev: row.get::>(6)?, - final_rev: row.get::>(7)?, - retry_count: retry as u32, - })) - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/scout_cache.rs b/flowctl/crates/flowctl-db/src/repo/scout_cache.rs deleted file mode 100644 index 657eeaeb..00000000 --- a/flowctl/crates/flowctl-db/src/repo/scout_cache.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Async repository for scout result caching. -//! -//! Caches scout results keyed by `{scout_type}:{commit_hash}` with a -//! configurable TTL (default 24h). Auto-evicts expired entries on `set`. - -use chrono::{Duration, Utc}; -use libsql::{params, Connection}; - -use crate::error::DbError; - -/// Default cache TTL: 24 hours. -const CACHE_TTL_HOURS: i64 = 24; - -/// Reduced TTL for git-less fallback: 1 hour. -const NO_GIT_TTL_HOURS: i64 = 1; - -/// Async repository for scout result caching. -pub struct ScoutCacheRepo { - conn: Connection, -} - -impl ScoutCacheRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Get a cached scout result. Returns `None` if miss (not found or expired). - /// - /// TTL: 24h for normal keys, 1h for `*:no-git` keys. - pub async fn get(&self, key: &str) -> Result, DbError> { - let ttl_hours = if key.ends_with(":no-git") { - NO_GIT_TTL_HOURS - } else { - CACHE_TTL_HOURS - }; - - let cutoff = (Utc::now() - Duration::hours(ttl_hours)).format("%Y-%m-%d %H:%M:%S").to_string(); - - let mut rows = self - .conn - .query( - "SELECT result FROM scout_cache WHERE key = ?1 AND created_at >= ?2", - params![key.to_string(), cutoff], - ) - .await?; - - if let Some(row) = rows.next().await? { - let result: String = row.get(0)?; - Ok(Some(result)) - } else { - Ok(None) - } - } - - /// Set a cached scout result. Auto-evicts expired entries first, then upserts. - pub async fn set( - &self, - key: &str, - commit_hash: &str, - scout_type: &str, - result: &str, - ) -> Result<(), DbError> { - // Evict expired entries (older than 24h). - let cutoff = (Utc::now() - Duration::hours(CACHE_TTL_HOURS)).format("%Y-%m-%d %H:%M:%S").to_string(); - self.conn - .execute( - "DELETE FROM scout_cache WHERE created_at < ?1", - params![cutoff], - ) - .await?; - - // Upsert the new entry. - self.conn - .execute( - "INSERT INTO scout_cache (key, commit_hash, scout_type, result, created_at) - VALUES (?1, ?2, ?3, ?4, datetime('now')) - ON CONFLICT(key) DO UPDATE SET - commit_hash = excluded.commit_hash, - result = excluded.result, - created_at = excluded.created_at", - params![ - key.to_string(), - commit_hash.to_string(), - scout_type.to_string(), - result.to_string(), - ], - ) - .await?; - - Ok(()) - } - - /// Clear all cached scout results. - pub async fn clear(&self) -> Result { - let n = self.conn.execute("DELETE FROM scout_cache", ()).await?; - Ok(n) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool::open_memory_async; - - #[tokio::test] - async fn test_cache_hit() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn); - - repo.set("repo-scout:abc123", "abc123", "repo-scout", r#"{"findings":[]}"#) - .await - .unwrap(); - - let result = repo.get("repo-scout:abc123").await.unwrap(); - assert!(result.is_some()); - assert_eq!(result.unwrap(), r#"{"findings":[]}"#); - } - - #[tokio::test] - async fn test_cache_miss_commit() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn); - - repo.set("repo-scout:abc123", "abc123", "repo-scout", r#"{"findings":[]}"#) - .await - .unwrap(); - - // Different commit hash → different key → miss. - let result = repo.get("repo-scout:def456").await.unwrap(); - assert!(result.is_none()); - } - - #[tokio::test] - async fn test_cache_miss_ttl() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn.clone()); - - // Insert with a past timestamp to simulate expiry. - let past = (Utc::now() - Duration::hours(25)).format("%Y-%m-%d %H:%M:%S").to_string(); - conn.execute( - "INSERT INTO scout_cache (key, commit_hash, scout_type, result, created_at) - VALUES ('repo-scout:old', 'old', 'repo-scout', '{}', ?1)", - params![past], - ) - .await - .unwrap(); - - let result = repo.get("repo-scout:old").await.unwrap(); - assert!(result.is_none()); - } - - #[tokio::test] - async fn test_auto_eviction() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn.clone()); - - // Insert an expired entry. - let past = (Utc::now() - Duration::hours(25)).format("%Y-%m-%d %H:%M:%S").to_string(); - conn.execute( - "INSERT INTO scout_cache (key, commit_hash, scout_type, result, created_at) - VALUES ('old-scout:expired', 'expired', 'old-scout', '{}', ?1)", - params![past], - ) - .await - .unwrap(); - - // Set a new entry — should evict the expired one. - repo.set("repo-scout:new", "new", "repo-scout", r#"{"data":"fresh"}"#) - .await - .unwrap(); - - // Verify expired entry is gone. - let mut rows = conn - .query("SELECT key FROM scout_cache WHERE key = 'old-scout:expired'", ()) - .await - .unwrap(); - assert!(rows.next().await.unwrap().is_none()); - - // Verify new entry exists. - let result = repo.get("repo-scout:new").await.unwrap(); - assert!(result.is_some()); - } - - #[tokio::test] - async fn test_upsert() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn.clone()); - - repo.set("repo-scout:abc", "abc", "repo-scout", "v1") - .await - .unwrap(); - - repo.set("repo-scout:abc", "abc", "repo-scout", "v2") - .await - .unwrap(); - - let result = repo.get("repo-scout:abc").await.unwrap(); - assert_eq!(result.unwrap(), "v2"); - - // Verify only one row exists. - let mut rows = conn - .query("SELECT COUNT(*) FROM scout_cache WHERE key = 'repo-scout:abc'", ()) - .await - .unwrap(); - let row = rows.next().await.unwrap().unwrap(); - let count: i64 = row.get(0).unwrap(); - assert_eq!(count, 1); - } - - #[tokio::test] - async fn test_clear() { - let (_db, conn) = open_memory_async().await.unwrap(); - let repo = ScoutCacheRepo::new(conn); - - repo.set("a:1", "1", "a", "data1").await.unwrap(); - repo.set("b:2", "2", "b", "data2").await.unwrap(); - - let n = repo.clear().await.unwrap(); - assert_eq!(n, 2); - - let result = repo.get("a:1").await.unwrap(); - assert!(result.is_none()); - } -} diff --git a/flowctl/crates/flowctl-db/src/repo/task.rs b/flowctl/crates/flowctl-db/src/repo/task.rs deleted file mode 100644 index 93bed2e0..00000000 --- a/flowctl/crates/flowctl-db/src/repo/task.rs +++ /dev/null @@ -1,312 +0,0 @@ -//! Async repository for task CRUD operations. - -use chrono::Utc; -use libsql::{params, Connection}; - -use flowctl_core::state_machine::Status; -use flowctl_core::types::Task; - -use crate::error::DbError; - -use super::helpers::{parse_datetime, parse_domain, parse_status}; - -/// Async repository for task CRUD operations. -pub struct TaskRepo { - conn: Connection, -} - -impl TaskRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Insert or replace a task (empty body preserves existing body). - pub async fn upsert(&self, task: &Task) -> Result<(), DbError> { - self.upsert_with_body(task, "").await - } - - /// Insert or replace a task with its markdown body. - pub async fn upsert_with_body(&self, task: &Task, body: &str) -> Result<(), DbError> { - self.conn - .execute( - "INSERT INTO tasks (id, epic_id, title, status, priority, domain, file_path, body, created_at, updated_at) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) - ON CONFLICT(id) DO UPDATE SET - title = excluded.title, - status = excluded.status, - priority = excluded.priority, - domain = excluded.domain, - file_path = excluded.file_path, - body = CASE WHEN excluded.body = '' THEN tasks.body ELSE excluded.body END, - updated_at = excluded.updated_at", - params![ - task.id.clone(), - task.epic.clone(), - task.title.clone(), - task.status.to_string(), - task.sort_priority() as i64, - task.domain.to_string(), - task.file_path.clone().unwrap_or_default(), - body.to_string(), - task.created_at.to_rfc3339(), - task.updated_at.to_rfc3339(), - ], - ) - .await?; - - // Upsert dependencies. - self.conn - .execute( - "DELETE FROM task_deps WHERE task_id = ?1", - params![task.id.clone()], - ) - .await?; - for dep in &task.depends_on { - self.conn - .execute( - "INSERT INTO task_deps (task_id, depends_on) VALUES (?1, ?2)", - params![task.id.clone(), dep.clone()], - ) - .await?; - } - - // Upsert file ownership. - self.conn - .execute( - "DELETE FROM file_ownership WHERE task_id = ?1", - params![task.id.clone()], - ) - .await?; - for file in &task.files { - self.conn - .execute( - "INSERT INTO file_ownership (file_path, task_id) VALUES (?1, ?2)", - params![file.clone(), task.id.clone()], - ) - .await?; - } - - Ok(()) - } - - /// Get a task by ID. - pub async fn get(&self, id: &str) -> Result { - self.get_with_body(id).await.map(|(task, _)| task) - } - - /// Get a task by ID, returning (Task, body). - pub async fn get_with_body(&self, id: &str) -> Result<(Task, String), DbError> { - let mut rows = self - .conn - .query( - "SELECT id, epic_id, title, status, priority, domain, file_path, created_at, updated_at, COALESCE(body, '') - FROM tasks WHERE id = ?1", - params![id.to_string()], - ) - .await?; - - let row = rows - .next() - .await? - .ok_or_else(|| DbError::NotFound(format!("task: {id}")))?; - - let status_s: String = row.get(3)?; - let domain_s: String = row.get(5)?; - let created_s: String = row.get(7)?; - let updated_s: String = row.get(8)?; - let priority_val: i64 = row.get(4)?; - let priority = if priority_val == 999 { - None - } else { - Some(priority_val as u32) - }; - - let task = Task { - schema_version: 1, - id: row.get::(0)?, - epic: row.get::(1)?, - title: row.get::(2)?, - status: parse_status(&status_s), - priority, - domain: parse_domain(&domain_s), - depends_on: Vec::new(), - files: Vec::new(), - r#impl: None, - review: None, - sync: None, - file_path: row.get::>(6)?, - created_at: parse_datetime(&created_s), - updated_at: parse_datetime(&updated_s), - }; - let body: String = row.get::(9)?; - - let deps = self.get_deps(&task.id).await?; - let files = self.get_files(&task.id).await?; - Ok(( - Task { - depends_on: deps, - files, - ..task - }, - body, - )) - } - - /// List tasks for an epic. - pub async fn list_by_epic(&self, epic_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id FROM tasks WHERE epic_id = ?1 ORDER BY priority, id", - params![epic_id.to_string()], - ) - .await?; - - let mut ids: Vec = Vec::new(); - while let Some(row) = rows.next().await? { - ids.push(row.get::(0)?); - } - - let mut out = Vec::with_capacity(ids.len()); - for id in &ids { - out.push(self.get(id).await?); - } - Ok(out) - } - - /// List all tasks, optionally filtered by status and/or domain. - pub async fn list_all( - &self, - status: Option<&str>, - domain: Option<&str>, - ) -> Result, DbError> { - let mut rows = match (status, domain) { - (Some(s), Some(d)) => { - self.conn - .query( - "SELECT id FROM tasks WHERE status = ?1 AND domain = ?2 ORDER BY epic_id, priority, id", - params![s.to_string(), d.to_string()], - ) - .await? - } - (Some(s), None) => { - self.conn - .query( - "SELECT id FROM tasks WHERE status = ?1 ORDER BY epic_id, priority, id", - params![s.to_string()], - ) - .await? - } - (None, Some(d)) => { - self.conn - .query( - "SELECT id FROM tasks WHERE domain = ?1 ORDER BY epic_id, priority, id", - params![d.to_string()], - ) - .await? - } - (None, None) => { - self.conn - .query("SELECT id FROM tasks ORDER BY epic_id, priority, id", ()) - .await? - } - }; - - let mut ids: Vec = Vec::new(); - while let Some(row) = rows.next().await? { - ids.push(row.get::(0)?); - } - - let mut out = Vec::with_capacity(ids.len()); - for id in &ids { - out.push(self.get(id).await?); - } - Ok(out) - } - - /// List tasks filtered by status. - pub async fn list_by_status(&self, status: Status) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id FROM tasks WHERE status = ?1 ORDER BY priority, id", - params![status.to_string()], - ) - .await?; - let mut ids: Vec = Vec::new(); - while let Some(row) = rows.next().await? { - ids.push(row.get::(0)?); - } - - let mut out = Vec::with_capacity(ids.len()); - for id in &ids { - out.push(self.get(id).await?); - } - Ok(out) - } - - /// Update task status. - pub async fn update_status(&self, id: &str, status: Status) -> Result<(), DbError> { - let rows = self - .conn - .execute( - "UPDATE tasks SET status = ?1, updated_at = ?2 WHERE id = ?3", - params![status.to_string(), Utc::now().to_rfc3339(), id.to_string()], - ) - .await?; - if rows == 0 { - return Err(DbError::NotFound(format!("task: {id}"))); - } - Ok(()) - } - - /// Delete a task and all related data. - pub async fn delete(&self, id: &str) -> Result<(), DbError> { - self.conn - .execute( - "DELETE FROM task_deps WHERE task_id = ?1", - params![id.to_string()], - ) - .await?; - self.conn - .execute( - "DELETE FROM file_ownership WHERE task_id = ?1", - params![id.to_string()], - ) - .await?; - self.conn - .execute("DELETE FROM tasks WHERE id = ?1", params![id.to_string()]) - .await?; - Ok(()) - } - - async fn get_deps(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT depends_on FROM task_deps WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } - - async fn get_files(&self, task_id: &str) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT file_path FROM file_ownership WHERE task_id = ?1", - params![task_id.to_string()], - ) - .await?; - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(row.get::(0)?); - } - Ok(out) - } -} diff --git a/flowctl/crates/flowctl-db/src/schema.sql b/flowctl/crates/flowctl-db/src/schema.sql deleted file mode 100644 index 9d74e988..00000000 --- a/flowctl/crates/flowctl-db/src/schema.sql +++ /dev/null @@ -1,313 +0,0 @@ --- flowctl libSQL schema (fresh, no migrations). --- Consolidates migrations 01-04 plus adds native vector column on memory. --- Applied once on DB open via pool::apply_schema(). - --- ── Indexed from Markdown (rebuildable via reindex) ───────────────── - -CREATE TABLE IF NOT EXISTS epics ( - id TEXT PRIMARY KEY, - title TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'open', - branch_name TEXT, - plan_review TEXT DEFAULT 'unknown', - auto_execute_pending INTEGER DEFAULT 0, - auto_execute_set_at TEXT, - archived INTEGER DEFAULT 0, - file_path TEXT NOT NULL, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - body TEXT NOT NULL DEFAULT '' -); - -CREATE TABLE IF NOT EXISTS tasks ( - id TEXT PRIMARY KEY, - epic_id TEXT NOT NULL REFERENCES epics(id), - title TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'todo', - priority INTEGER DEFAULT 999, - domain TEXT DEFAULT 'general', - file_path TEXT NOT NULL, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - body TEXT NOT NULL DEFAULT '' -); - -CREATE TABLE IF NOT EXISTS task_deps ( - task_id TEXT NOT NULL, - depends_on TEXT NOT NULL, - PRIMARY KEY (task_id, depends_on) -); - -CREATE TABLE IF NOT EXISTS epic_deps ( - epic_id TEXT NOT NULL, - depends_on TEXT NOT NULL, - PRIMARY KEY (epic_id, depends_on) -); - --- Reverse dependency index: O(1) lookup of "what depends on task X" -CREATE TABLE IF NOT EXISTS task_reverse_deps ( - depends_on TEXT NOT NULL, - task_id TEXT NOT NULL, - PRIMARY KEY (depends_on, task_id) -); - -CREATE INDEX IF NOT EXISTS idx_task_reverse_deps_task ON task_reverse_deps(task_id); - --- Auto-maintain reverse index from task_deps INSERT/DELETE -CREATE TRIGGER IF NOT EXISTS trg_task_deps_insert AFTER INSERT ON task_deps -BEGIN - INSERT OR IGNORE INTO task_reverse_deps (depends_on, task_id) - VALUES (NEW.depends_on, NEW.task_id); -END; - -CREATE TRIGGER IF NOT EXISTS trg_task_deps_delete AFTER DELETE ON task_deps -BEGIN - DELETE FROM task_reverse_deps - WHERE depends_on = OLD.depends_on AND task_id = OLD.task_id; -END; - -CREATE TABLE IF NOT EXISTS file_ownership ( - file_path TEXT NOT NULL, - task_id TEXT NOT NULL, - PRIMARY KEY (file_path, task_id) -); - --- ── Gaps registry (replaces epics/{id}.gaps.json sidecar) ───────────── - -CREATE TABLE IF NOT EXISTS gaps ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - epic_id TEXT NOT NULL, - capability TEXT NOT NULL, - priority TEXT NOT NULL DEFAULT 'important', - source TEXT, - status TEXT NOT NULL DEFAULT 'open', - resolved_at TEXT, - evidence TEXT, - task_id TEXT, - created_at TEXT NOT NULL DEFAULT (datetime('now')), - FOREIGN KEY (epic_id) REFERENCES epics(id) -); - --- ── Runtime-only (not in Markdown, not rebuildable) ───────────────── - -CREATE TABLE IF NOT EXISTS runtime_state ( - task_id TEXT PRIMARY KEY, - assignee TEXT, - claimed_at TEXT, - completed_at TEXT, - duration_secs INTEGER, - blocked_reason TEXT, - baseline_rev TEXT, - final_rev TEXT, - retry_count INTEGER NOT NULL DEFAULT 0 -); - -CREATE TABLE IF NOT EXISTS file_locks ( - file_path TEXT NOT NULL, - task_id TEXT NOT NULL, - locked_at TEXT NOT NULL, - holder_pid INTEGER, - expires_at TEXT, - lock_mode TEXT NOT NULL DEFAULT 'write', - PRIMARY KEY (file_path, task_id) -); - -CREATE TABLE IF NOT EXISTS heartbeats ( - task_id TEXT PRIMARY KEY, - last_beat TEXT NOT NULL, - worker_pid INTEGER -); - -CREATE TABLE IF NOT EXISTS phase_progress ( - task_id TEXT NOT NULL, - phase TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - completed_at TEXT, - PRIMARY KEY (task_id, phase) -); - -CREATE TABLE IF NOT EXISTS evidence ( - task_id TEXT PRIMARY KEY, - commits TEXT, - tests TEXT, - files_changed INTEGER, - insertions INTEGER, - deletions INTEGER, - review_iters INTEGER -); - --- ── Event log + metrics (append-only) ─────────────────────────────── - -CREATE TABLE IF NOT EXISTS events ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')), - epic_id TEXT NOT NULL, - task_id TEXT, - event_type TEXT NOT NULL, - actor TEXT, - payload TEXT, - session_id TEXT -); - --- ── Event store (event-sourced pipeline) ────────────────────────────── - -CREATE TABLE IF NOT EXISTS event_store ( - event_id INTEGER PRIMARY KEY AUTOINCREMENT, - stream_id TEXT NOT NULL, - version INTEGER NOT NULL, - event_type TEXT NOT NULL, - payload TEXT NOT NULL, - metadata TEXT, - created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')) -); - -CREATE UNIQUE INDEX IF NOT EXISTS idx_event_store_stream_version - ON event_store(stream_id, version); - --- ── Pipeline progress ───────────────────────────────────────────────── - -CREATE TABLE IF NOT EXISTS pipeline_progress ( - epic_id TEXT PRIMARY KEY, - phase TEXT NOT NULL DEFAULT 'plan', - started_at TEXT, - updated_at TEXT -); - -CREATE TABLE IF NOT EXISTS token_usage ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')), - epic_id TEXT NOT NULL, - task_id TEXT, - phase TEXT, - model TEXT, - input_tokens INTEGER, - output_tokens INTEGER, - cache_read INTEGER DEFAULT 0, - cache_write INTEGER DEFAULT 0, - estimated_cost REAL -); - -CREATE TABLE IF NOT EXISTS daily_rollup ( - day TEXT NOT NULL, - epic_id TEXT, - tasks_started INTEGER DEFAULT 0, - tasks_completed INTEGER DEFAULT 0, - tasks_failed INTEGER DEFAULT 0, - total_duration_s INTEGER DEFAULT 0, - input_tokens INTEGER DEFAULT 0, - output_tokens INTEGER DEFAULT 0, - PRIMARY KEY (day, epic_id) -); - -CREATE TABLE IF NOT EXISTS monthly_rollup ( - month TEXT PRIMARY KEY, - epics_completed INTEGER DEFAULT 0, - tasks_completed INTEGER DEFAULT 0, - avg_lead_time_h REAL DEFAULT 0, - total_tokens INTEGER DEFAULT 0, - total_cost_usd REAL DEFAULT 0 -); - --- ── Approvals (replaces stdin-blocking Teams protocol) ───────────── - -CREATE TABLE IF NOT EXISTS approvals ( - id TEXT PRIMARY KEY, - task_id TEXT NOT NULL, - kind TEXT NOT NULL, -- file_access | mutation | generic - payload TEXT NOT NULL, -- JSON - status TEXT NOT NULL DEFAULT 'pending', -- pending | approved | rejected - created_at INTEGER NOT NULL, - resolved_at INTEGER, - resolver TEXT, - reason TEXT -); - -CREATE INDEX IF NOT EXISTS idx_approvals_status ON approvals(status); -CREATE INDEX IF NOT EXISTS idx_approvals_task ON approvals(task_id); - --- ── Memory with native vector embedding (BGE-small, 384-dim) ──────── - -CREATE TABLE IF NOT EXISTS memory ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - entry_type TEXT NOT NULL, - content TEXT NOT NULL, - summary TEXT, - hash TEXT UNIQUE, - module TEXT, - severity TEXT, - problem_type TEXT, - component TEXT, - tags TEXT DEFAULT '[]', - track TEXT, - created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')), - last_verified TEXT, - refs INTEGER NOT NULL DEFAULT 0, - embedding BLOB -); - --- ── Skills with native vector embedding (BGE-small, 384-dim) ─────── - -CREATE TABLE IF NOT EXISTS skills ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE, - description TEXT NOT NULL, - plugin_path TEXT, - updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')), - embedding BLOB -); - --- Native libSQL vector index for semantic skill matching --- NOTE: Applied separately in pool.rs with graceful degradation. --- CREATE INDEX IF NOT EXISTS skills_emb_idx ON skills(libsql_vector_idx(embedding)); - --- ── Scout result cache ────────────────────────────────────────────── - -CREATE TABLE IF NOT EXISTS scout_cache ( - key TEXT PRIMARY KEY, - commit_hash TEXT NOT NULL, - scout_type TEXT NOT NULL, - result TEXT NOT NULL, - created_at TEXT NOT NULL DEFAULT (datetime('now')) -); - -CREATE INDEX IF NOT EXISTS idx_scout_cache_type ON scout_cache(scout_type); -CREATE INDEX IF NOT EXISTS idx_scout_cache_created ON scout_cache(created_at); - --- ── Indexes ───────────────────────────────────────────────────────── - -CREATE INDEX IF NOT EXISTS idx_gaps_epic ON gaps(epic_id); -CREATE INDEX IF NOT EXISTS idx_gaps_status ON gaps(status); -CREATE INDEX IF NOT EXISTS idx_tasks_epic ON tasks(epic_id); -CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status); -CREATE INDEX IF NOT EXISTS idx_events_entity ON events(epic_id, task_id); -CREATE INDEX IF NOT EXISTS idx_events_ts ON events(timestamp); -CREATE INDEX IF NOT EXISTS idx_events_type ON events(event_type, timestamp); -CREATE INDEX IF NOT EXISTS idx_token_epic ON token_usage(epic_id); -CREATE INDEX IF NOT EXISTS idx_memory_type ON memory(entry_type); -CREATE INDEX IF NOT EXISTS idx_memory_module ON memory(module); -CREATE INDEX IF NOT EXISTS idx_memory_track ON memory(track); -CREATE INDEX IF NOT EXISTS idx_memory_severity ON memory(severity); - --- Native libSQL vector index for semantic memory search --- NOTE: libsql_vector_idx requires libSQL server extensions (not available in core/embedded mode). --- Applied separately in pool.rs with graceful degradation. --- CREATE INDEX IF NOT EXISTS memory_emb_idx ON memory(libsql_vector_idx(embedding)); - --- ── Auto-aggregation trigger ──────────────────────────────────────── - -CREATE TRIGGER IF NOT EXISTS trg_daily_rollup AFTER INSERT ON events -WHEN NEW.event_type IN ('task_completed', 'task_failed', 'task_started') -BEGIN - INSERT INTO daily_rollup (day, epic_id, tasks_completed, tasks_failed, tasks_started) - VALUES (DATE(NEW.timestamp), NEW.epic_id, - CASE WHEN NEW.event_type = 'task_completed' THEN 1 ELSE 0 END, - CASE WHEN NEW.event_type = 'task_failed' THEN 1 ELSE 0 END, - CASE WHEN NEW.event_type = 'task_started' THEN 1 ELSE 0 END) - ON CONFLICT(day, epic_id) DO UPDATE SET - tasks_completed = tasks_completed + - CASE WHEN NEW.event_type = 'task_completed' THEN 1 ELSE 0 END, - tasks_failed = tasks_failed + - CASE WHEN NEW.event_type = 'task_failed' THEN 1 ELSE 0 END, - tasks_started = tasks_started + - CASE WHEN NEW.event_type = 'task_started' THEN 1 ELSE 0 END; -END; diff --git a/flowctl/crates/flowctl-db/src/skill.rs b/flowctl/crates/flowctl-db/src/skill.rs deleted file mode 100644 index d2855fb9..00000000 --- a/flowctl/crates/flowctl-db/src/skill.rs +++ /dev/null @@ -1,273 +0,0 @@ -//! Skill repository with native libSQL vector search. -//! -//! Stores skill metadata (name, description, plugin path) with a 384-dim -//! BGE-small embedding for semantic matching via `vector_top_k`. -//! -//! Reuses `embed_one()`, `ensure_embedder()`, and `vec_to_literal()` from -//! the memory module -- zero duplication. - -use libsql::{params, Connection}; - -use crate::error::DbError; -use crate::memory::{embed_one, vec_to_literal}; - -// ── Types ─────────────────────────────────────────────────────────── - -/// A skill match result from semantic search. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct SkillMatch { - pub name: String, - pub description: String, - pub score: f64, -} - -/// A registered skill entry. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct SkillEntry { - pub id: i64, - pub name: String, - pub description: String, - pub plugin_path: Option, - pub updated_at: String, -} - -// ── Repository ────────────────────────────────────────────────────── - -/// Async repository for skill metadata + semantic vector search. -pub struct SkillRepo { - conn: Connection, -} - -impl SkillRepo { - pub fn new(conn: Connection) -> Self { - Self { conn } - } - - /// Insert or replace a skill. Auto-generates an embedding from - /// `description` when the embedder is available; otherwise leaves the - /// embedding NULL and logs a warning. - pub async fn upsert( - &self, - name: &str, - description: &str, - plugin_path: Option<&str>, - ) -> Result<(), DbError> { - let now = chrono::Utc::now().to_rfc3339(); - - self.conn - .execute( - "INSERT INTO skills (name, description, plugin_path, updated_at) - VALUES (?1, ?2, ?3, ?4) - ON CONFLICT(name) DO UPDATE SET - description = excluded.description, - plugin_path = excluded.plugin_path, - updated_at = excluded.updated_at", - params![ - name.to_string(), - description.to_string(), - plugin_path.map(String::from), - now, - ], - ) - .await?; - - // Attempt to embed; swallow failures (NULL embedding is fine). - match embed_one(description).await { - Ok(vec) => { - let lit = vec_to_literal(&vec); - self.conn - .execute( - "UPDATE skills SET embedding = vector32(?1) WHERE name = ?2", - params![lit, name.to_string()], - ) - .await?; - } - Err(e) => { - tracing::warn!( - skill = name, - error = %e, - "embedder unavailable; skill inserted without embedding" - ); - } - } - - Ok(()) - } - - /// Semantic search: embed the query, find nearest skills via - /// `vector_top_k`, convert L2 distance to cosine similarity, and - /// filter by threshold. - /// - /// Returns `Ok(vec![])` (not an error) if the embedder or vector - /// index is unavailable -- graceful degradation. - pub async fn match_skills( - &self, - query: &str, - limit: usize, - threshold: f64, - ) -> Result, DbError> { - let vec = match embed_one(query).await { - Ok(v) => v, - Err(_) => return Ok(vec![]), - }; - let lit = vec_to_literal(&vec); - - // Use vector_distance_cos() instead of vector_top_k() — works without - // a vector index (no ANN index required in embedded mode). Exact search - // via full table scan; perfectly fast for <10,000 rows (~30 skills). - let rows_result = self - .conn - .query( - "SELECT s.name, s.description, - vector_distance_cos(s.embedding, vector32(?1)) AS distance - FROM skills s - WHERE s.embedding IS NOT NULL - ORDER BY distance ASC - LIMIT ?2", - params![lit, limit as i64], - ) - .await; - - let mut rows = match rows_result { - Ok(r) => r, - Err(_) => return Ok(vec![]), // vector functions unavailable - }; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - let dist: f64 = row.get(2)?; - // Cosine distance → cosine similarity: sim = 1 - dist - // (0 = identical, 1 = orthogonal, 2 = opposite) - let score = 1.0 - dist; - if score >= threshold { - out.push(SkillMatch { - name: row.get::(0)?, - description: row.get::(1)?, - score, - }); - } - } - Ok(out) - } - - /// List all registered skills (for debugging / introspection). - pub async fn list(&self) -> Result, DbError> { - let mut rows = self - .conn - .query( - "SELECT id, name, description, plugin_path, updated_at - FROM skills - ORDER BY name ASC", - (), - ) - .await?; - - let mut out = Vec::new(); - while let Some(row) = rows.next().await? { - out.push(SkillEntry { - id: row.get::(0)?, - name: row.get::(1)?, - description: row.get::(2)?, - plugin_path: row.get::>(3)?, - updated_at: row.get::(4)?, - }); - } - Ok(out) - } -} - -// ── Tests ─────────────────────────────────────────────────────────── - -#[cfg(test)] -mod tests { - use super::*; - use crate::pool::open_memory_async; - - async fn fresh_repo() -> SkillRepo { - let (_db, conn) = open_memory_async().await.expect("open memory db"); - let _ = Box::leak(Box::new(_db)); - SkillRepo::new(conn) - } - - #[tokio::test] - async fn test_upsert_and_list() { - let repo = fresh_repo().await; - repo.upsert("plan", "Plan and design tasks", Some("/plugins/flow")) - .await - .expect("upsert"); - repo.upsert("work", "Execute implementation tasks", None) - .await - .expect("upsert"); - - let skills = repo.list().await.expect("list"); - assert_eq!(skills.len(), 2); - assert_eq!(skills[0].name, "plan"); - assert_eq!(skills[1].name, "work"); - } - - #[tokio::test] - async fn test_upsert_replaces() { - let repo = fresh_repo().await; - repo.upsert("plan", "old description", None) - .await - .expect("upsert"); - repo.upsert("plan", "new description", Some("/new/path")) - .await - .expect("upsert"); - - let skills = repo.list().await.expect("list"); - assert_eq!(skills.len(), 1); - assert_eq!(skills[0].description, "new description"); - assert_eq!(skills[0].plugin_path.as_deref(), Some("/new/path")); - } - - #[tokio::test] - async fn test_match_skills_graceful_no_index() { - // In-memory DB won't have vector index; should return empty, not error. - let repo = fresh_repo().await; - repo.upsert("plan", "Plan tasks", None) - .await - .expect("upsert"); - - let matches = repo - .match_skills("planning", 5, 0.5) - .await - .expect("match_skills should not error"); - // May be empty if embedder or index is unavailable -- that's fine. - assert!(matches.len() <= 5); - } - - /// Semantic match end-to-end using vector_distance_cos (no index needed). - /// Gated behind `#[ignore]` because the first run downloads the - /// BGE-small model (~130MB). - #[tokio::test] - #[ignore = "requires fastembed model (~130MB); run with --ignored"] - async fn test_match_skills_semantic() { - let repo = fresh_repo().await; - repo.upsert("plan", "Design and architect implementation plans", None) - .await - .expect("upsert"); - repo.upsert("work", "Execute coding tasks and write code", None) - .await - .expect("upsert"); - repo.upsert("review", "Review code changes for quality", None) - .await - .expect("upsert"); - - let matches = repo - .match_skills("architecture design", 3, 0.3) - .await - .expect("match_skills"); - assert!(!matches.is_empty(), "expected at least one match"); - // "plan" (Design and architect...) should be the best match - assert_eq!( - matches[0].name, "plan", - "expected 'plan' as best match for architecture query, got '{}'", - matches[0].name - ); - // Scores should be between 0 and 1 - for m in &matches { - assert!(m.score > 0.0 && m.score <= 1.0, "score out of range: {}", m.score); - } - } -} diff --git a/flowctl/crates/flowctl-db/src/store.rs b/flowctl/crates/flowctl-db/src/store.rs new file mode 100644 index 00000000..3b683197 --- /dev/null +++ b/flowctl/crates/flowctl-db/src/store.rs @@ -0,0 +1,104 @@ +//! FlowStore — the main entry point for file-based storage. +//! +//! Wraps a `.flow/` directory path and provides access to sub-stores +//! for epics, tasks, events, pipeline, phases, locks, memory, approvals, and gaps. + +use std::path::{Path, PathBuf}; + +use crate::approvals::ApprovalStore; +use crate::events::EventStore; +use crate::gaps::GapStore; +use crate::locks::LockStore; +use crate::memory::MemoryStore; +use crate::phases::PhaseStore; +use crate::pipeline::PipelineStore; + +/// Top-level store backed by a `.flow/` directory. +pub struct FlowStore { + flow_dir: PathBuf, +} + +impl FlowStore { + /// Create a new store rooted at the given `.flow/` directory. + pub fn new(flow_dir: PathBuf) -> Self { + Self { flow_dir } + } + + /// Ensure all required subdirectories exist. + pub fn ensure_dirs(&self) -> Result<(), crate::error::DbError> { + flowctl_core::json_store::ensure_dirs(&self.flow_dir)?; + Ok(()) + } + + /// Return the flow directory path. + pub fn flow_dir(&self) -> &Path { + &self.flow_dir + } + + /// Access the event store. + pub fn events(&self) -> EventStore<'_> { + EventStore::new(&self.flow_dir) + } + + /// Access the pipeline store. + pub fn pipeline(&self) -> PipelineStore<'_> { + PipelineStore::new(&self.flow_dir) + } + + /// Access the phase store. + pub fn phases(&self) -> PhaseStore<'_> { + PhaseStore::new(&self.flow_dir) + } + + /// Access the lock store. + pub fn locks(&self) -> LockStore<'_> { + LockStore::new(&self.flow_dir) + } + + /// Access the memory store. + pub fn memory(&self) -> MemoryStore<'_> { + MemoryStore::new(&self.flow_dir) + } + + /// Access the approval store. + pub fn approvals(&self) -> ApprovalStore<'_> { + ApprovalStore::new(&self.flow_dir) + } + + /// Access the gap store. + pub fn gaps(&self) -> GapStore<'_> { + GapStore::new(&self.flow_dir) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn store_ensure_dirs() { + let tmp = TempDir::new().unwrap(); + let store = FlowStore::new(tmp.path().to_path_buf()); + store.ensure_dirs().unwrap(); + assert!(tmp.path().join("epics").exists()); + assert!(tmp.path().join("tasks").exists()); + assert!(tmp.path().join("specs").exists()); + assert!(tmp.path().join(".state").exists()); + assert!(tmp.path().join("memory").exists()); + } + + #[test] + fn store_accessors_return_sub_stores() { + let tmp = TempDir::new().unwrap(); + let store = FlowStore::new(tmp.path().to_path_buf()); + // Just verify the accessors compile and return the right types. + let _ = store.events(); + let _ = store.pipeline(); + let _ = store.phases(); + let _ = store.locks(); + let _ = store.memory(); + let _ = store.approvals(); + let _ = store.gaps(); + } +} diff --git a/flowctl/crates/flowctl-service/Cargo.toml b/flowctl/crates/flowctl-service/Cargo.toml index 1ddc70e2..b5652d83 100644 --- a/flowctl/crates/flowctl-service/Cargo.toml +++ b/flowctl/crates/flowctl-service/Cargo.toml @@ -9,18 +9,14 @@ license.workspace = true [dependencies] flowctl-core = { workspace = true } flowctl-db = { workspace = true } -libsql = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } chrono = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } -tokio = { workspace = true } -async-trait = "0.1" [lints] workspace = true [dev-dependencies] tempfile = "3" -tokio = { workspace = true } diff --git a/flowctl/crates/flowctl-service/src/approvals.rs b/flowctl/crates/flowctl-service/src/approvals.rs index 993bbab2..7db239aa 100644 --- a/flowctl/crates/flowctl-service/src/approvals.rs +++ b/flowctl/crates/flowctl-service/src/approvals.rs @@ -1,150 +1,60 @@ -//! Approval store: CRUD over the `approvals` libSQL table. +//! Approval store: file-based CRUD over `.state/approvals.json`. //! //! Wraps `flowctl_core::approvals::Approval` protocol types with persistence. //! Used by the CLI and MCP to keep approval state consistent. use chrono::Utc; -use libsql::{params, Connection}; use flowctl_core::approvals::{ - Approval, ApprovalKind, ApprovalStatus, CreateApprovalRequest, + Approval, ApprovalStatus, CreateApprovalRequest, }; -use crate::error::{ServiceError, ServiceResult}; +use flowctl_db::FlowStore; -/// Trait for approval persistence. Wire-level implementation sits on libSQL; -/// integration tests may supply a fake. -#[async_trait::async_trait] -pub trait ApprovalStore: Send + Sync { - async fn create(&self, req: CreateApprovalRequest) -> ServiceResult; - async fn list(&self, status_filter: Option) -> ServiceResult>; - async fn get(&self, id: &str) -> ServiceResult; - async fn approve(&self, id: &str, resolver: Option) -> ServiceResult; - async fn reject( - &self, - id: &str, - resolver: Option, - reason: Option, - ) -> ServiceResult; -} +use crate::error::{ServiceError, ServiceResult}; -/// libSQL-backed approval store. -#[derive(Clone)] -pub struct LibSqlApprovalStore { - conn: Connection, +/// File-backed approval store. +pub struct FileApprovalStore { + store: FlowStore, } -impl LibSqlApprovalStore { - pub fn new(conn: Connection) -> Self { - Self { conn } +impl FileApprovalStore { + pub fn new(store: FlowStore) -> Self { + Self { store } } fn new_id() -> String { - // Simple monotonic-ish identifier. Uses epoch millis + 4-byte random - // suffix (time-based) — collision risk within the same ms is negligible - // for single-daemon usage and keeps the crate dep-free. let now = Utc::now(); let millis = now.timestamp_millis(); let nanos = now.timestamp_subsec_nanos(); format!("apv-{millis:x}-{nanos:x}") } - async fn load_row(&self, id: &str) -> ServiceResult { - let mut rows = self - .conn - .query( - "SELECT id, task_id, kind, payload, status, created_at, resolved_at, resolver, reason - FROM approvals WHERE id = ?1", - params![id.to_string()], - ) - .await - .map_err(|e| ServiceError::ValidationError(format!("query failed: {e}")))?; - - let row = rows - .next() - .await - .map_err(|e| ServiceError::ValidationError(format!("row read failed: {e}")))? - .ok_or_else(|| ServiceError::TaskNotFound(format!("approval not found: {id}")))?; - - row_to_approval(row) + fn load_all(&self) -> ServiceResult> { + let raw = self.store.approvals().read() + .map_err(ServiceError::DbError)?; + let mut out = Vec::new(); + for val in raw { + if let Ok(a) = serde_json::from_value::(val) { + out.push(a); + } + } + Ok(out) } -} - -fn row_to_approval(row: libsql::Row) -> ServiceResult { - let id: String = row - .get(0) - .map_err(|e| ServiceError::ValidationError(format!("id: {e}")))?; - let task_id: String = row - .get(1) - .map_err(|e| ServiceError::ValidationError(format!("task_id: {e}")))?; - let kind_str: String = row - .get(2) - .map_err(|e| ServiceError::ValidationError(format!("kind: {e}")))?; - let payload_str: String = row - .get(3) - .map_err(|e| ServiceError::ValidationError(format!("payload: {e}")))?; - let status_str: String = row - .get(4) - .map_err(|e| ServiceError::ValidationError(format!("status: {e}")))?; - let created_at: i64 = row - .get(5) - .map_err(|e| ServiceError::ValidationError(format!("created_at: {e}")))?; - let resolved_at: Option = row - .get(6) - .map_err(|e| ServiceError::ValidationError(format!("resolved_at: {e}")))?; - let resolver: Option = row - .get(7) - .map_err(|e| ServiceError::ValidationError(format!("resolver: {e}")))?; - let reason: Option = row - .get(8) - .map_err(|e| ServiceError::ValidationError(format!("reason: {e}")))?; - let kind = ApprovalKind::parse(&kind_str) - .ok_or_else(|| ServiceError::ValidationError(format!("unknown kind: {kind_str}")))?; - let status = ApprovalStatus::parse(&status_str) - .ok_or_else(|| ServiceError::ValidationError(format!("unknown status: {status_str}")))?; - let payload: serde_json::Value = serde_json::from_str(&payload_str) - .map_err(|e| ServiceError::ValidationError(format!("payload JSON: {e}")))?; - - Ok(Approval { - id, - task_id, - kind, - payload, - status, - created_at, - resolved_at, - resolver, - reason, - }) -} + fn save_all(&self, approvals: &[Approval]) -> ServiceResult<()> { + let vals: Vec = approvals + .iter() + .filter_map(|a| serde_json::to_value(a).ok()) + .collect(); + self.store.approvals().write(&vals) + .map_err(ServiceError::DbError)?; + Ok(()) + } -#[async_trait::async_trait] -impl ApprovalStore for LibSqlApprovalStore { - async fn create(&self, req: CreateApprovalRequest) -> ServiceResult { - // Reject orphan approvals: the referenced task must exist. Without - // this check a typo creates a ghost pending record with no way to - // reconcile it to real work. - let exists: i64 = { - let mut rows = self - .conn - .query( - "SELECT COUNT(*) FROM tasks WHERE id = ?1", - params![req.task_id.clone()], - ) - .await - .map_err(|e| ServiceError::ValidationError(format!("task lookup: {e}")))?; - let row = rows - .next() - .await - .map_err(|e| ServiceError::ValidationError(format!("task lookup row: {e}")))? - .ok_or_else(|| { - ServiceError::ValidationError("task lookup returned no rows".into()) - })?; - row.get(0) - .map_err(|e| ServiceError::ValidationError(format!("task lookup value: {e}")))? - }; - if exists == 0 { + pub fn create(&self, req: CreateApprovalRequest) -> ServiceResult { + // Validate task exists + if flowctl_core::json_store::task_read(self.store.flow_dir(), &req.task_id).is_err() { return Err(ServiceError::ValidationError(format!( "task {} does not exist", req.task_id @@ -153,277 +63,91 @@ impl ApprovalStore for LibSqlApprovalStore { let id = Self::new_id(); let now = Utc::now().timestamp(); - let payload_str = serde_json::to_string(&req.payload) - .map_err(|e| ServiceError::ValidationError(format!("payload encode: {e}")))?; - self.conn - .execute( - "INSERT INTO approvals (id, task_id, kind, payload, status, created_at) - VALUES (?1, ?2, ?3, ?4, 'pending', ?5)", - params![ - id.clone(), - req.task_id.clone(), - req.kind.as_str().to_string(), - payload_str, - now, - ], - ) - .await - .map_err(|e| ServiceError::ValidationError(format!("insert failed: {e}")))?; + let approval = Approval { + id: id.clone(), + task_id: req.task_id, + kind: req.kind, + payload: req.payload, + status: ApprovalStatus::Pending, + created_at: now, + resolved_at: None, + resolver: None, + reason: None, + }; + + let mut all = self.load_all()?; + all.push(approval.clone()); + self.save_all(&all)?; - self.load_row(&id).await + Ok(approval) } - async fn list(&self, status_filter: Option) -> ServiceResult> { - let mut sql = String::from( - "SELECT id, task_id, kind, payload, status, created_at, resolved_at, resolver, reason - FROM approvals", - ); - let mut rows = if let Some(s) = status_filter { - sql.push_str(" WHERE status = ?1 ORDER BY created_at DESC"); - self.conn - .query(&sql, params![s.as_str().to_string()]) - .await - .map_err(|e| ServiceError::ValidationError(format!("query failed: {e}")))? + pub fn list(&self, status_filter: Option) -> ServiceResult> { + let all = self.load_all()?; + let mut filtered: Vec = if let Some(s) = status_filter { + all.into_iter().filter(|a| a.status == s).collect() } else { - sql.push_str(" ORDER BY created_at DESC"); - self.conn - .query(&sql, ()) - .await - .map_err(|e| ServiceError::ValidationError(format!("query failed: {e}")))? + all }; - - let mut out = Vec::new(); - while let Some(row) = rows - .next() - .await - .map_err(|e| ServiceError::ValidationError(format!("row read: {e}")))? - { - out.push(row_to_approval(row)?); - } - Ok(out) + filtered.sort_by(|a, b| b.created_at.cmp(&a.created_at)); + Ok(filtered) } - async fn get(&self, id: &str) -> ServiceResult { - self.load_row(id).await + pub fn get(&self, id: &str) -> ServiceResult { + let all = self.load_all()?; + all.into_iter() + .find(|a| a.id == id) + .ok_or_else(|| ServiceError::TaskNotFound(format!("approval not found: {id}"))) } - async fn approve(&self, id: &str, resolver: Option) -> ServiceResult { - // Pre-check for better error messages; authoritative guard is the UPDATE below. - let existing = self.load_row(id).await?; - if existing.status != ApprovalStatus::Pending { + pub fn approve(&self, id: &str, resolver: Option) -> ServiceResult { + let mut all = self.load_all()?; + let approval = all.iter_mut() + .find(|a| a.id == id) + .ok_or_else(|| ServiceError::TaskNotFound(format!("approval not found: {id}")))?; + + if approval.status != ApprovalStatus::Pending { return Err(ServiceError::InvalidTransition(format!( "approval {id} is already {:?}", - existing.status + approval.status ))); } - let now = Utc::now().timestamp(); - let affected = self - .conn - .execute( - "UPDATE approvals SET status = 'approved', resolved_at = ?1, resolver = ?2 - WHERE id = ?3 AND status = 'pending'", - params![now, resolver.clone(), id.to_string()], - ) - .await - .map_err(|e| ServiceError::ValidationError(format!("update failed: {e}")))?; - if affected == 0 { - // Lost a race with another resolver — the row is no longer pending. - return Err(ServiceError::InvalidTransition(format!( - "approval {id} was resolved concurrently" - ))); - } - self.load_row(id).await + + approval.status = ApprovalStatus::Approved; + approval.resolved_at = Some(Utc::now().timestamp()); + approval.resolver = resolver; + let result = approval.clone(); + + self.save_all(&all)?; + Ok(result) } - async fn reject( + pub fn reject( &self, id: &str, resolver: Option, reason: Option, ) -> ServiceResult { - // Pre-check for better error messages; authoritative guard is the UPDATE below. - let existing = self.load_row(id).await?; - if existing.status != ApprovalStatus::Pending { + let mut all = self.load_all()?; + let approval = all.iter_mut() + .find(|a| a.id == id) + .ok_or_else(|| ServiceError::TaskNotFound(format!("approval not found: {id}")))?; + + if approval.status != ApprovalStatus::Pending { return Err(ServiceError::InvalidTransition(format!( "approval {id} is already {:?}", - existing.status + approval.status ))); } - let now = Utc::now().timestamp(); - let affected = self - .conn - .execute( - "UPDATE approvals SET status = 'rejected', resolved_at = ?1, resolver = ?2, reason = ?3 - WHERE id = ?4 AND status = 'pending'", - params![now, resolver.clone(), reason.clone(), id.to_string()], - ) - .await - .map_err(|e| ServiceError::ValidationError(format!("update failed: {e}")))?; - if affected == 0 { - // Lost a race with another resolver — the row is no longer pending. - return Err(ServiceError::InvalidTransition(format!( - "approval {id} was resolved concurrently" - ))); - } - self.load_row(id).await - } -} - -#[cfg(test)] -mod tests { - use super::*; - - async fn in_mem_store() -> LibSqlApprovalStore { - let (db, conn) = flowctl_db::open_memory_async().await.unwrap(); - // Seed tasks referenced by the tests so the existence check passes. - let now = "2026-01-01T00:00:00Z"; - for tid in &["fn-1.1", "fn-1.2"] { - let (epic_id, _num) = tid.split_once('.').unwrap(); - conn.execute( - "INSERT OR IGNORE INTO epics - (id, title, status, file_path, created_at, updated_at, body) - VALUES (?1, ?1, 'open', ?1, ?2, ?2, '')", - params![epic_id.to_string(), now.to_string()], - ) - .await - .expect("seed epic"); - conn.execute( - "INSERT OR IGNORE INTO tasks - (id, epic_id, title, status, file_path, created_at, updated_at, body) - VALUES (?1, ?2, ?1, 'todo', ?1, ?3, ?3, '')", - params![tid.to_string(), epic_id.to_string(), now.to_string()], - ) - .await - .expect("seed task"); - } - // Leak the db so conn stays valid for the test lifetime. - Box::leak(Box::new(db)); - LibSqlApprovalStore::new(conn) - } - #[tokio::test] - async fn create_rejects_nonexistent_task() { - let store = in_mem_store().await; - let err = store - .create(CreateApprovalRequest { - task_id: "fn-999.99".into(), - kind: ApprovalKind::FileAccess, - payload: serde_json::json!({}), - }) - .await - .expect_err("should reject nonexistent task"); - assert!(matches!(err, ServiceError::ValidationError(_))); - } - - #[tokio::test] - async fn create_and_get() { - let store = in_mem_store().await; - let created = store - .create(CreateApprovalRequest { - task_id: "fn-1.1".into(), - kind: ApprovalKind::FileAccess, - payload: serde_json::json!({"files": ["a.rs"]}), - }) - .await - .unwrap(); - assert_eq!(created.status, ApprovalStatus::Pending); - assert_eq!(created.task_id, "fn-1.1"); - - let fetched = store.get(&created.id).await.unwrap(); - assert_eq!(fetched.id, created.id); - } - - #[tokio::test] - async fn list_with_filter() { - let store = in_mem_store().await; - let a = store - .create(CreateApprovalRequest { - task_id: "fn-1.1".into(), - kind: ApprovalKind::Generic, - payload: serde_json::json!({}), - }) - .await - .unwrap(); - let b = store - .create(CreateApprovalRequest { - task_id: "fn-1.2".into(), - kind: ApprovalKind::Mutation, - payload: serde_json::json!({"op": "split"}), - }) - .await - .unwrap(); - store.approve(&b.id, Some("alice".into())).await.unwrap(); - - let pending = store - .list(Some(ApprovalStatus::Pending)) - .await - .unwrap(); - assert_eq!(pending.len(), 1); - assert_eq!(pending[0].id, a.id); - - let approved = store - .list(Some(ApprovalStatus::Approved)) - .await - .unwrap(); - assert_eq!(approved.len(), 1); - assert_eq!(approved[0].id, b.id); - - let all = store.list(None).await.unwrap(); - assert_eq!(all.len(), 2); - } - - #[tokio::test] - async fn approve_transitions_status() { - let store = in_mem_store().await; - let created = store - .create(CreateApprovalRequest { - task_id: "fn-1.1".into(), - kind: ApprovalKind::FileAccess, - payload: serde_json::json!({}), - }) - .await - .unwrap(); - let resolved = store - .approve(&created.id, Some("bob".into())) - .await - .unwrap(); - assert_eq!(resolved.status, ApprovalStatus::Approved); - assert!(resolved.resolved_at.is_some()); - assert_eq!(resolved.resolver.as_deref(), Some("bob")); - - // Double-approve should fail. - let err = store.approve(&created.id, None).await.unwrap_err(); - matches!(err, ServiceError::InvalidTransition(_)); - } - - #[tokio::test] - async fn reject_records_reason() { - let store = in_mem_store().await; - let created = store - .create(CreateApprovalRequest { - task_id: "fn-1.1".into(), - kind: ApprovalKind::Mutation, - payload: serde_json::json!({}), - }) - .await - .unwrap(); - let resolved = store - .reject( - &created.id, - Some("carol".into()), - Some("not safe".into()), - ) - .await - .unwrap(); - assert_eq!(resolved.status, ApprovalStatus::Rejected); - assert_eq!(resolved.reason.as_deref(), Some("not safe")); - } + approval.status = ApprovalStatus::Rejected; + approval.resolved_at = Some(Utc::now().timestamp()); + approval.resolver = resolver; + approval.reason = reason; + let result = approval.clone(); - #[tokio::test] - async fn get_missing_returns_not_found() { - let store = in_mem_store().await; - let err = store.get("apv-missing").await.unwrap_err(); - matches!(err, ServiceError::TaskNotFound(_)); + self.save_all(&all)?; + Ok(result) } } diff --git a/flowctl/crates/flowctl-service/src/changes.rs b/flowctl/crates/flowctl-service/src/changes.rs index a92bfef9..28320ce9 100644 --- a/flowctl/crates/flowctl-service/src/changes.rs +++ b/flowctl/crates/flowctl-service/src/changes.rs @@ -1,17 +1,14 @@ -//! Applies a `Changes` batch against JSON files and the libSQL event log. +//! Applies a `Changes` batch against JSON files and the JSONL event log. //! //! `ChangesApplier` is the single execution point for declarative mutations. //! It iterates each `Mutation` in order, writes to the `.flow/` JSON store, -//! and auto-logs an event to the `events` table for auditability. +//! and auto-logs an event to the JSONL log for auditability. use std::path::Path; use flowctl_core::changes::{Changes, Mutation}; -use flowctl_core::events::{ - EpicEvent, EventMetadata, FlowEvent, TaskEvent, epic_stream_id, task_stream_id, -}; use flowctl_core::json_store; -use flowctl_db::{EventRepo, EventStoreRepo}; +use flowctl_db::FlowStore; use crate::error::{ServiceError, ServiceResult}; @@ -25,36 +22,24 @@ fn store_err(e: json_store::StoreError) -> ServiceError { pub struct ApplyResult { /// Number of mutations successfully applied. pub applied: usize, - /// Event IDs for each logged event (one per mutation). - pub event_ids: Vec, } /// Executes a `Changes` batch against JSON file storage and the event log. pub struct ChangesApplier<'a> { flow_dir: &'a Path, - event_repo: &'a EventRepo, - event_store: Option, actor: Option<&'a str>, session_id: Option<&'a str>, } impl<'a> ChangesApplier<'a> { - pub fn new(flow_dir: &'a Path, event_repo: &'a EventRepo) -> Self { + pub fn new(flow_dir: &'a Path) -> Self { Self { flow_dir, - event_repo, - event_store: None, actor: None, session_id: None, } } - /// Set an event store for domain event emission alongside audit logging. - pub fn with_event_store(mut self, store: EventStoreRepo) -> Self { - self.event_store = Some(store); - self - } - /// Set the actor (who is applying the changes) for event logging. pub fn with_actor(mut self, actor: &'a str) -> Self { self.actor = Some(actor); @@ -68,22 +53,16 @@ impl<'a> ChangesApplier<'a> { } /// Apply all mutations in order. Stops on first error. - pub async fn apply(&self, changes: &Changes) -> ServiceResult { + pub fn apply(&self, changes: &Changes) -> ServiceResult { let mut applied = 0; - let mut event_ids = Vec::with_capacity(changes.len()); for mutation in &changes.mutations { - // Emit domain event to event store (best-effort, before mutation) - self.emit_domain_event(mutation).await; - self.apply_one(mutation)?; - - let event_id = self.log_event(mutation).await?; - event_ids.push(event_id); + self.log_event(mutation); applied += 1; } - Ok(ApplyResult { applied, event_ids }) + Ok(ApplyResult { applied }) } /// Apply a single mutation to the JSON file store. @@ -132,12 +111,12 @@ impl<'a> ChangesApplier<'a> { Ok(()) } - /// Log a mutation to the events table. - async fn log_event(&self, mutation: &Mutation) -> ServiceResult { + /// Log a mutation to the JSONL event log. Best-effort: failures are ignored. + fn log_event(&self, mutation: &Mutation) { + let store = FlowStore::new(self.flow_dir.to_path_buf()); let event_type = mutation.event_type(); let entity_id = mutation.entity_id(); - // Derive epic_id and task_id for the event row. let epic_id = mutation .epic_id() .unwrap_or(entity_id); @@ -153,52 +132,17 @@ impl<'a> ChangesApplier<'a> { _ => None, }; - // Payload: JSON of the entity ID for traceability. - let payload = serde_json::json!({ "entity_id": entity_id }).to_string(); - - let row_id = self - .event_repo - .insert( - epic_id, - task_id, - event_type, - self.actor, - Some(&payload), - self.session_id, - ) - .await - .map_err(ServiceError::DbError)?; - - Ok(row_id) - } - - /// Emit a domain event to the event store for create mutations. - /// Best-effort: failures are silently ignored so they don't block the pipeline. - async fn emit_domain_event(&self, mutation: &Mutation) { - let store = match self.event_store { - Some(ref s) => s, - None => return, - }; - - let (stream, flow_event) = match mutation { - Mutation::CreateEpic { epic } => ( - epic_stream_id(&epic.id), - FlowEvent::Epic(EpicEvent::Created), - ), - Mutation::CreateTask { task } => ( - task_stream_id(&task.id), - FlowEvent::Task(TaskEvent::Created), - ), - _ => return, - }; - - let metadata = EventMetadata { - actor: self.actor.unwrap_or("system").into(), - source_cmd: "changes_applier".into(), - session_id: self.session_id.unwrap_or("").into(), - timestamp: Some(chrono::Utc::now().to_rfc3339()), - }; - - let _ = store.append(&stream, &flow_event, &metadata).await.ok(); + let event = serde_json::json!({ + "stream_id": format!("mutation:{epic_id}"), + "type": event_type, + "entity_id": entity_id, + "epic_id": epic_id, + "task_id": task_id, + "actor": self.actor.unwrap_or("system"), + "session_id": self.session_id.unwrap_or(""), + "timestamp": chrono::Utc::now().to_rfc3339(), + }); + + let _ = store.events().append(&event.to_string()); } } diff --git a/flowctl/crates/flowctl-service/src/connection.rs b/flowctl/crates/flowctl-service/src/connection.rs deleted file mode 100644 index b9e042e6..00000000 --- a/flowctl/crates/flowctl-service/src/connection.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Connection management for the service layer (async libSQL). -//! -//! `libsql::Connection` is `Send + Sync` and cheap to `Clone`. Callers pass -//! it by value or reference. No mutex wrapping is needed. - -use std::path::{Path, PathBuf}; - -use libsql::Connection; - -use crate::error::{ServiceError, ServiceResult}; - -/// File-backed connection provider using a working directory. -/// -/// Wraps `flowctl_db::open_async()` so callers can re-open as needed. -#[derive(Debug, Clone)] -pub struct FileConnectionProvider { - working_dir: PathBuf, -} - -impl FileConnectionProvider { - /// Create a provider rooted at the given working directory. - pub fn new(working_dir: impl Into) -> Self { - Self { - working_dir: working_dir.into(), - } - } - - /// Return the working directory this provider is rooted at. - pub fn working_dir(&self) -> &Path { - &self.working_dir - } - - /// Open a new libSQL connection asynchronously. - pub async fn connect(&self) -> ServiceResult { - let db = flowctl_db::open_async(&self.working_dir) - .await - .map_err(ServiceError::from)?; - db.connect().map_err(|e| { - ServiceError::DbError(flowctl_db::DbError::LibSql(e)) - }) - } -} - -/// Open a connection asynchronously (convenience wrapper around -/// `flowctl_db::open_async`). -pub async fn open_async(working_dir: &Path) -> ServiceResult { - let db = flowctl_db::open_async(working_dir) - .await - .map_err(ServiceError::from)?; - db.connect() - .map_err(|e| ServiceError::DbError(flowctl_db::DbError::LibSql(e))) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn file_provider_roundtrip() { - let tmp = tempfile::tempdir().unwrap(); - let provider = FileConnectionProvider::new(tmp.path()); - let conn = provider.connect().await; - assert!(conn.is_ok(), "should open file-backed connection: {:?}", conn.err()); - } - - #[tokio::test] - async fn open_async_works() { - let tmp = tempfile::tempdir().unwrap(); - let conn = open_async(tmp.path()).await; - assert!(conn.is_ok(), "open_async should succeed: {:?}", conn.err()); - } -} diff --git a/flowctl/crates/flowctl-service/src/lib.rs b/flowctl/crates/flowctl-service/src/lib.rs index d4be0828..895d33d0 100644 --- a/flowctl/crates/flowctl-service/src/lib.rs +++ b/flowctl/crates/flowctl-service/src/lib.rs @@ -8,23 +8,18 @@ //! //! ```text //! CLI commands ─┐ -//! MCP server ───┴─► flowctl-service ──► flowctl-db ──► SQLite +//! MCP server ───┴─► flowctl-service ──► flowctl-db ──► JSON files //! │ //! flowctl-core (types, DAG, state machine) //! ``` //! -//! # Connection management -//! -//! `libsql::Connection` is `Send + Sync` and cheap to `Clone`. All service -//! functions are async and accept the connection by reference. +//! All operations are synchronous, using file-based storage. pub mod approvals; pub mod changes; -pub mod connection; pub mod error; pub mod lifecycle; pub mod outputs; // Re-export key types at crate root. -pub use connection::{open_async, FileConnectionProvider}; pub use error::{ServiceError, ServiceResult}; diff --git a/flowctl/crates/flowctl-service/src/lifecycle.rs b/flowctl/crates/flowctl-service/src/lifecycle.rs index 45de9f94..ee3ccde2 100644 --- a/flowctl/crates/flowctl-service/src/lifecycle.rs +++ b/flowctl/crates/flowctl-service/src/lifecycle.rs @@ -2,13 +2,12 @@ //! //! These functions contain the business logic extracted from the CLI //! lifecycle commands. Each accepts a request struct and returns -//! `ServiceResult`, using SQLite as the sole source of truth. +//! `ServiceResult`, using JSON file store as the sole source of truth. use std::fs; use std::path::Path; use chrono::Utc; -use libsql::Connection; use flowctl_core::id::{epic_id_from_task, is_task_id}; use flowctl_core::state_machine::{Status, Transition}; @@ -16,8 +15,7 @@ use flowctl_core::types::{ Epic, EpicStatus, Evidence, RuntimeState, Task, REVIEWS_DIR, }; -use flowctl_core::events::{EventMetadata, FlowEvent, TaskEvent, task_stream_id}; -use flowctl_db::EventStoreRepo; +use flowctl_db::FlowStore; use crate::error::{ServiceError, ServiceResult}; @@ -113,16 +111,15 @@ fn validate_task_id(id: &str) -> ServiceResult<()> { } /// Load a task from JSON files. -async fn load_task(_conn: Option<&Connection>, flow_dir: &Path, task_id: &str) -> Option { +fn load_task(flow_dir: &Path, task_id: &str) -> Option { flowctl_core::json_store::task_read(flow_dir, task_id).ok() } -async fn load_epic(_conn: Option<&Connection>, flow_dir: &Path, epic_id: &str) -> Option { +fn load_epic(flow_dir: &Path, epic_id: &str) -> Option { flowctl_core::json_store::epic_read(flow_dir, epic_id).ok() } -async fn get_runtime(_conn: Option<&Connection>, flow_dir: &Path, task_id: &str) -> Option { - // Read from JSON state and convert to RuntimeState for compatibility +fn get_runtime(flow_dir: &Path, task_id: &str) -> Option { let state = flowctl_core::json_store::state_read(flow_dir, task_id).ok()?; Some(RuntimeState { task_id: task_id.to_string(), @@ -138,8 +135,7 @@ async fn get_runtime(_conn: Option<&Connection>, flow_dir: &Path, task_id: &str) } /// Load all tasks for an epic from JSON files. -async fn load_tasks_for_epic( - _conn: Option<&Connection>, +fn load_tasks_for_epic( flow_dir: &Path, epic_id: &str, ) -> std::collections::HashMap { @@ -157,8 +153,7 @@ async fn load_tasks_for_epic( } /// Find all downstream dependents of a task within the same epic. -async fn find_dependents( - conn: Option<&Connection>, +fn find_dependents( flow_dir: &Path, task_id: &str, ) -> Vec { @@ -167,7 +162,7 @@ async fn find_dependents( Err(_) => return Vec::new(), }; - let tasks = load_tasks_for_epic(conn, flow_dir, &epic_id).await; + let tasks = load_tasks_for_epic(flow_dir, &epic_id); let mut dependents = Vec::new(); let mut visited = std::collections::HashSet::new(); let mut queue = vec![task_id.to_string()]; @@ -206,8 +201,7 @@ fn get_max_retries_from_config(config: Option<&serde_json::Value>) -> u32 { } /// Propagate upstream_failed to all transitive downstream tasks. -async fn propagate_upstream_failure( - conn: Option<&Connection>, +fn propagate_upstream_failure( flow_dir: &Path, failed_id: &str, ) -> Vec { @@ -216,7 +210,7 @@ async fn propagate_upstream_failure( Err(_) => return Vec::new(), }; - let tasks = load_tasks_for_epic(conn, flow_dir, &epic_id).await; + let tasks = load_tasks_for_epic(flow_dir, &epic_id); let task_list: Vec = tasks.values().cloned().collect(); let dag = match flowctl_core::TaskDag::from_tasks(&task_list) { @@ -242,7 +236,6 @@ async fn propagate_upstream_failure( continue; } - // Update JSON state if let Ok(mut state) = flowctl_core::json_store::state_read(flow_dir, tid) { state.status = Status::UpstreamFailed; state.updated_at = Utc::now(); @@ -266,8 +259,7 @@ async fn propagate_upstream_failure( } /// Handle task failure: check retries, set up_for_retry or failed + propagate. -async fn handle_task_failure( - conn: Option<&Connection>, +fn handle_task_failure( flow_dir: &Path, task_id: &str, runtime: &Option, @@ -295,8 +287,7 @@ async fn handle_task_failure( flowctl_core::json_store::state_write(flow_dir, task_id, &task_state) .map_err(|e| std::io::Error::other(format!("failed to write retry state for {task_id}: {e}")))?; - // Audit event - log_audit_event(conn, task_id, "task_failed").await; + log_audit_event(flow_dir, task_id, "task_failed"); Ok((Status::UpForRetry, Vec::new())) } else { @@ -307,10 +298,9 @@ async fn handle_task_failure( flowctl_core::json_store::state_write(flow_dir, task_id, &task_state) .map_err(|e| std::io::Error::other(format!("failed to write failed state for {task_id}: {e}")))?; - // Audit event - log_audit_event(conn, task_id, "task_failed").await; + log_audit_event(flow_dir, task_id, "task_failed"); - let affected = propagate_upstream_failure(conn, flow_dir, task_id).await; + let affected = propagate_upstream_failure(flow_dir, task_id); Ok((Status::Failed, affected)) } } @@ -426,62 +416,60 @@ fn archive_review_receipt( // ── Audit event helper ─────────────────────────────────────────── -/// Log an audit event. Failures are silently ignored (audit must not block). -async fn log_audit_event( - conn: Option<&Connection>, +/// Log an audit event to the JSONL event log. Failures are silently ignored. +fn log_audit_event( + flow_dir: &Path, task_id: &str, event_type: &str, ) { - if let Some(c) = conn { - let epic_id = epic_id_from_task(task_id).unwrap_or_default(); - let repo = flowctl_db::EventRepo::new(c.clone()); - let _ = repo - .insert(&epic_id, Some(task_id), event_type, None, None, None) - .await - .ok(); - } + let epic_id = epic_id_from_task(task_id).unwrap_or_default(); + let store = FlowStore::new(flow_dir.to_path_buf()); + let event = serde_json::json!({ + "stream_id": format!("task:{task_id}"), + "type": event_type, + "epic_id": epic_id, + "task_id": task_id, + "timestamp": chrono::Utc::now().to_rfc3339(), + }); + let _ = store.events().append(&event.to_string()); } -/// Emit a task event to the event store. Failures are silently ignored -/// (event emission must not block the lifecycle operation). -async fn emit_task_event( - conn: Option<&Connection>, +/// Emit a task event to the event store. Failures are silently ignored. +fn emit_task_event( + flow_dir: &Path, task_id: &str, - event: TaskEvent, + event_type: &str, source_cmd: &str, ) { - if let Some(c) = conn { - let repo = EventStoreRepo::new(c.clone()); - let stream = task_stream_id(task_id); - let flow_event = FlowEvent::Task(event); - let metadata = EventMetadata { - actor: "lifecycle".into(), - source_cmd: source_cmd.into(), - session_id: String::new(), - timestamp: Some(chrono::Utc::now().to_rfc3339()), - }; - let _ = repo.append(&stream, &flow_event, &metadata).await.ok(); - } + let store = FlowStore::new(flow_dir.to_path_buf()); + let stream_id = format!("task:{task_id}"); + let event = serde_json::json!({ + "stream_id": stream_id, + "type": event_type, + "source_cmd": source_cmd, + "actor": "lifecycle", + "timestamp": chrono::Utc::now().to_rfc3339(), + }); + let _ = store.events().append(&event.to_string()); } // ── Service functions ────────────────────────────────────────────── -/// Start a task: validate deps, state machine, actor, update DB + Markdown. -pub async fn start_task( - conn: Option<&Connection>, +/// Start a task: validate deps, state machine, actor, update state. +pub fn start_task( flow_dir: &Path, req: StartTaskRequest, ) -> ServiceResult { validate_task_id(&req.task_id)?; - let task = load_task(conn, flow_dir, &req.task_id).await.ok_or_else(|| { + let task = load_task(flow_dir, &req.task_id).ok_or_else(|| { ServiceError::TaskNotFound(req.task_id.clone()) })?; // Validate dependencies unless --force if !req.force { for dep in &task.depends_on { - let dep_task = load_task(conn, flow_dir, dep).await.ok_or_else(|| { + let dep_task = load_task(flow_dir, dep).ok_or_else(|| { ServiceError::DependencyUnsatisfied { task: req.task_id.clone(), dependency: format!("{} not found", dep), @@ -496,7 +484,7 @@ pub async fn start_task( } } - let existing_rt = get_runtime(conn, flow_dir, &req.task_id).await; + let existing_rt = get_runtime(flow_dir, &req.task_id); let existing_assignee = existing_rt.as_ref().and_then(|rt| rt.assignee.clone()); // Validate state machine transition (unless --force) @@ -579,15 +567,11 @@ pub async fn start_task( updated_at: Utc::now(), }; - // Write to JSON state file flowctl_core::json_store::state_write(flow_dir, &req.task_id, &task_state) .map_err(|e| ServiceError::IoError(std::io::Error::other(e.to_string())))?; - // Audit event - log_audit_event(conn, &req.task_id, "task_started").await; - - // Event store - emit_task_event(conn, &req.task_id, TaskEvent::Started, "flowctl start").await; + log_audit_event(flow_dir, &req.task_id, "task_started"); + emit_task_event(flow_dir, &req.task_id, "started", "flowctl start"); Ok(StartTaskResponse { task_id: req.task_id, @@ -595,19 +579,18 @@ pub async fn start_task( }) } -/// Complete a task: validate status/actor, collect evidence, update DB + Markdown. -pub async fn done_task( - conn: Option<&Connection>, +/// Complete a task: validate status/actor, collect evidence, update state. +pub fn done_task( flow_dir: &Path, req: DoneTaskRequest, ) -> ServiceResult { validate_task_id(&req.task_id)?; - let task = load_task(conn, flow_dir, &req.task_id).await.ok_or_else(|| { + let task = load_task(flow_dir, &req.task_id).ok_or_else(|| { ServiceError::TaskNotFound(req.task_id.clone()) })?; - let runtime = get_runtime(conn, flow_dir, &req.task_id).await; + let runtime = get_runtime(flow_dir, &req.task_id); // 1. Validate status + actor validate_done_request(&task, &runtime, &req)?; @@ -673,10 +656,8 @@ pub async fn done_task( archive_review_receipt(flow_dir, &req.task_id, &evidence_obj); // 8. Audit event - log_audit_event(conn, &req.task_id, "task_completed").await; - - // Event store - emit_task_event(conn, &req.task_id, TaskEvent::Completed, "flowctl done").await; + log_audit_event(flow_dir, &req.task_id, "task_completed"); + emit_task_event(flow_dir, &req.task_id, "completed", "flowctl done"); Ok(DoneTaskResponse { task_id: req.task_id, @@ -714,15 +695,14 @@ fn validate_workspace_changes(evidence_obj: &serde_json::Value) -> Option, +/// Block a task: validate status, read reason, update state. +pub fn block_task( flow_dir: &Path, req: BlockTaskRequest, ) -> ServiceResult { validate_task_id(&req.task_id)?; - let task = load_task(conn, flow_dir, &req.task_id).await.ok_or_else(|| { + let task = load_task(flow_dir, &req.task_id).ok_or_else(|| { ServiceError::TaskNotFound(req.task_id.clone()) })?; @@ -761,8 +741,7 @@ pub async fn block_task( .map_err(|e| ServiceError::IoError(std::io::Error::other(e.to_string())))?; } - // Event store - emit_task_event(conn, &req.task_id, TaskEvent::Blocked, "flowctl block").await; + emit_task_event(flow_dir, &req.task_id, "blocked", "flowctl block"); Ok(BlockTaskResponse { task_id: req.task_id, @@ -770,15 +749,14 @@ pub async fn block_task( }) } -/// Fail a task: check retries, propagate upstream failure, update DB + Markdown. -pub async fn fail_task( - conn: Option<&Connection>, +/// Fail a task: check retries, propagate upstream failure, update state. +pub fn fail_task( flow_dir: &Path, req: FailTaskRequest, ) -> ServiceResult { validate_task_id(&req.task_id)?; - let task = load_task(conn, flow_dir, &req.task_id).await.ok_or_else(|| { + let task = load_task(flow_dir, &req.task_id).ok_or_else(|| { ServiceError::TaskNotFound(req.task_id.clone()) })?; @@ -789,16 +767,15 @@ pub async fn fail_task( ))); } - let runtime = get_runtime(conn, flow_dir, &req.task_id).await; + let runtime = get_runtime(flow_dir, &req.task_id); let reason_text = req.reason.unwrap_or_else(|| "Task failed".to_string()); let config = load_config(flow_dir); let (final_status, upstream_failed_ids) = - handle_task_failure(conn, flow_dir, &req.task_id, &runtime, config.as_ref()).await + handle_task_failure(flow_dir, &req.task_id, &runtime, config.as_ref()) .map_err(ServiceError::IoError)?; - // Event store - emit_task_event(conn, &req.task_id, TaskEvent::Failed, "flowctl fail").await; + emit_task_event(flow_dir, &req.task_id, "failed", "flowctl fail"); let max_retries = get_max_retries_from_config(config.as_ref()); let retry_count = if final_status == Status::UpForRetry { @@ -822,20 +799,19 @@ pub async fn fail_task( } /// Restart a task and cascade to all downstream dependents. -pub async fn restart_task( - conn: Option<&Connection>, +pub fn restart_task( flow_dir: &Path, req: RestartTaskRequest, ) -> ServiceResult { validate_task_id(&req.task_id)?; - let _task = load_task(conn, flow_dir, &req.task_id).await.ok_or_else(|| { + let _task = load_task(flow_dir, &req.task_id).ok_or_else(|| { ServiceError::TaskNotFound(req.task_id.clone()) })?; // Check epic not closed if let Ok(epic_id) = epic_id_from_task(&req.task_id) { - if let Some(epic) = load_epic(conn, flow_dir, &epic_id).await { + if let Some(epic) = load_epic(flow_dir, &epic_id) { if epic.status == EpicStatus::Done { return Err(ServiceError::ValidationError(format!( "Cannot restart task in closed epic {}", @@ -846,7 +822,7 @@ pub async fn restart_task( } // Find all downstream dependents - let dependents = find_dependents(conn, flow_dir, &req.task_id).await; + let dependents = find_dependents(flow_dir, &req.task_id); // Check for in_progress tasks let mut in_progress_ids = Vec::new(); @@ -854,7 +830,7 @@ pub async fn restart_task( in_progress_ids.push(req.task_id.clone()); } for dep_id in &dependents { - if let Some(dep_task) = load_task(conn, flow_dir, dep_id).await { + if let Some(dep_task) = load_task(flow_dir, dep_id) { if dep_task.status == Status::InProgress { in_progress_ids.push(dep_id.clone()); } @@ -876,7 +852,7 @@ pub async fn restart_task( let mut skipped = Vec::new(); for tid in &all_ids { - let t = match load_task(conn, flow_dir, tid).await { + let t = match load_task(flow_dir, tid) { Some(t) => t, None => continue, }; @@ -904,7 +880,6 @@ pub async fn restart_task( // Execute reset let mut reset_ids = Vec::new(); for tid in &to_reset { - // Reset to blank JSON state let blank = flowctl_core::json_store::TaskState::default(); flowctl_core::json_store::state_write(flow_dir, tid, &blank) .map_err(|e| ServiceError::IoError(std::io::Error::other(e.to_string())))?; @@ -912,8 +887,7 @@ pub async fn restart_task( reset_ids.push(tid.clone()); } - // Event store — emit Started for the restarted task - emit_task_event(conn, &req.task_id, TaskEvent::Started, "flowctl restart").await; + emit_task_event(flow_dir, &req.task_id, "started", "flowctl restart"); Ok(RestartTaskResponse { cascade_from: req.task_id, @@ -965,8 +939,6 @@ mod tests { } } - // ── validate_done_request ────────────────────────────────────── - #[test] fn test_validate_done_request_in_progress_ok() { let task = make_task("fn-1.1", Status::InProgress); @@ -1020,8 +992,6 @@ mod tests { assert!(validate_done_request(&task, &None, &req).is_ok()); } - // ── parse_evidence ───────────────────────────────────────────── - #[test] fn test_parse_evidence_default() { let req = make_done_req("fn-1.1", "alice"); @@ -1052,8 +1022,6 @@ mod tests { assert!(parse_evidence(&req).is_err()); } - // ── compute_duration ─────────────────────────────────────────── - #[test] fn test_compute_duration_none() { assert!(compute_duration(&None).is_none()); @@ -1076,8 +1044,6 @@ mod tests { assert!(dur >= 119 && dur <= 121); } - // ── validate_workspace_changes ───────────────────────────────── - #[test] fn test_validate_workspace_changes_none() { let ev = serde_json::json!({"commits": []}); @@ -1115,8 +1081,6 @@ mod tests { assert!(validate_workspace_changes(&ev).is_none()); } - // ── archive_review_receipt ───────────────────────────────────── - #[test] fn test_archive_review_receipt_writes_file() { let tmp = tempfile::tempdir().unwrap(); @@ -1138,12 +1102,9 @@ mod tests { let tmp = tempfile::tempdir().unwrap(); let ev = serde_json::json!({"commits": []}); archive_review_receipt(tmp.path(), "fn-1.1", &ev); - // Should not create reviews dir assert!(!tmp.path().join(REVIEWS_DIR).exists()); } - // ── config helpers ───────────────────────────────────────────── - #[test] fn test_get_max_retries_from_config_none() { assert_eq!(get_max_retries_from_config(None), 0); diff --git a/flowctl/crates/flowctl-service/src/outputs.rs b/flowctl/crates/flowctl-service/src/outputs.rs index 550bfb79..80aa5e98 100644 --- a/flowctl/crates/flowctl-service/src/outputs.rs +++ b/flowctl/crates/flowctl-service/src/outputs.rs @@ -3,7 +3,7 @@ //! Lives at `.flow/outputs/.md`. Worker writes in Phase 9; the //! next worker reads the last N during Phase 2 re-anchor. //! -//! No libSQL table — outputs are narrative handoff artifacts, not verified +//! No database table — outputs are narrative handoff artifacts, not verified //! state. Listing is done by directory scan + epic-prefix filtering. use std::fs; diff --git a/flowctl/tests/cmd/next_json.toml b/flowctl/tests/cmd/next_json.toml index 8ae72634..329c49f6 100644 --- a/flowctl/tests/cmd/next_json.toml +++ b/flowctl/tests/cmd/next_json.toml @@ -1,5 +1,5 @@ bin.name = "flowctl" args = ["--json", "next"] stdout = """ -{"epic":"fn-2-event-sourced-pipeline-first","reason":"resume_in_progress","status":"work","task":"fn-2-event-sourced-pipeline-first.4"} +{"epic":"fn-17-remove-db-and-fastembed-pure-file-based","reason":"resume_in_progress","status":"work","task":"fn-17-remove-db-and-fastembed-pure-file-based.4"} """