diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 9d567178d..a4ad24222 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -3,6 +3,9 @@ ## Project Overview Rust library for NP-hard problem reductions. Implements computational problems with reduction rules for transforming between equivalent formulations. +## Skills +When resolving an issue with pull request, please use [issue-to-pr](skills/issue-to-pr.md) skill to convert the issue into a pull request with a plan. + ## Commands ```bash make help # Show all available targets diff --git a/.claude/skills/issue-to-pr.md b/.claude/skills/issue-to-pr.md index bf50be6aa..350202779 100644 --- a/.claude/skills/issue-to-pr.md +++ b/.claude/skills/issue-to-pr.md @@ -7,12 +7,6 @@ description: Use when you have a GitHub issue and want to create a PR with an im Convert a GitHub issue into an actionable PR with a plan that auto-triggers Claude execution. -## Usage - -``` -/issue-to-pr -``` - ## Workflow ```dot @@ -24,7 +18,6 @@ digraph issue_to_pr { "Research references" [shape=box]; "Write plan file" [shape=box]; "Create branch and PR" [shape=box]; - "PR triggers [action]" [shape=doublecircle]; "Receive issue number" -> "Fetch issue with gh"; "Fetch issue with gh" -> "Check the rules to follow"; @@ -32,7 +25,6 @@ digraph issue_to_pr { "Verify completeness" -> "Research references"; "Research references" -> "Write plan file"; "Write plan file" -> "Create branch and PR"; - "Create branch and PR" -> "PR triggers [action]"; } ``` @@ -108,6 +100,7 @@ The plan MUST include an **action pipeline** section with concrete steps based o - Present example in tutorial style (see KColoring→QUBO section for reference) 5. **Regenerate graph** — `cargo run --example export_graph` +6. **Push and create PR** — Push the changes and create a pull request with a description of the changes. **Rules for solver implementation:** - Make sure at least one solver is provided in the issue template. Check if the solving strategy is valid. If not, reply under issue to ask for clarification. @@ -140,8 +133,10 @@ The plan MUST include an **action pipeline** section with concrete steps based o 3. **Document** — Update `docs/paper/reductions.typ`: - Add `display-name` entry - Add `#problem-def("Name")[definition...]` +4. **Push and create PR** — Push the changes and create a pull request with a description of the changes. ### 6. Create PR +Create a pull request with only the plan file. ```bash # Create branch @@ -156,8 +151,8 @@ git commit -m "Add plan for #: " # Push git push -u origin issue-<number>-<slug> -# Create PR with [action] at the BEGINNING -gh pr create --title "Fix #<number>: <title>" --body "[action] +# Create PR +gh pr create --title "Fix #<number>: <title>" --body " ## Summary <Brief description from brainstorming> @@ -165,7 +160,6 @@ gh pr create --title "Fix #<number>: <title>" --body "[action] Closes #<number>" ``` -**CRITICAL:** The PR body MUST start with `[action]` on the first line. This triggers automated plan execution. ## Example @@ -181,10 +175,9 @@ All required info is present. I'll create the plan... [Writes docs/plans/2026-02-09-independentset-to-qubo.md] [Creates branch, commits, pushes] -[Creates PR with body starting with "[action]"] +[Creates PR] -Created PR #45: Fix #42: Add IndependentSet → QUBO reduction -The [action] trigger will automatically execute the plan. +Created PR #45: Fix #42: Add IndependentSet → QUBO reduction, description: ... ``` ## Common Mistakes @@ -192,6 +185,5 @@ The [action] trigger will automatically execute the plan. | Mistake | Fix | |---------|-----| | Issue template incomplete | Ask contributor to fill in missing sections before proceeding | -| `[action]` not at start | PR body must BEGIN with `[action]` | | Including implementation code in initial PR | First PR: plan only | | Generic plan | Use specifics from the issue | diff --git a/.gitignore b/.gitignore index adcd0df21..678eb0061 100644 --- a/.gitignore +++ b/.gitignore @@ -77,3 +77,4 @@ docs/paper/examples/ # Claude Code logs claude-output.log +.worktrees/ \ No newline at end of file diff --git a/Makefile b/Makefile index 20dcef637..dd392f361 100644 --- a/Makefile +++ b/Makefile @@ -165,9 +165,9 @@ compare: rust-export INSTRUCTIONS ?= OUTPUT ?= claude-output.log AGENT_TYPE ?= claude +PLAN_FILE ?= $(shell ls -t docs/plans/*.md 2>/dev/null | head -1) run-plan: - PLAN_FILE ?= $(shell ls -t docs/plans/*.md 2>/dev/null | head -1) @NL=$$'\n'; \ BRANCH=$$(git branch --show-current); \ if [ "$(AGENT_TYPE)" = "claude" ]; then \ @@ -182,6 +182,7 @@ run-plan: PROMPT="$${PROMPT}$${NL}$${NL}## Process$${NL}$${PROCESS}$${NL}$${NL}## Rules$${NL}- Tests should be strong enough to catch regressions.$${NL}- Do not modify tests to make them pass.$${NL}- Test failure must be reported."; \ echo "=== Prompt ===" && echo "$$PROMPT" && echo "===" ; \ claude --dangerously-skip-permissions \ - --model claude-opus-4-6 \ + --model opus \ + --verbose \ --max-turns 500 \ -p "$$PROMPT" 2>&1 | tee "$(OUTPUT)" diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index eb718c9ef..1ff262445 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -806,6 +806,25 @@ The following reductions to Integer Linear Programming are straightforward formu _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. Constraints: $x_u + x_v <= 1$ for each $(u, v) in.not E$ (non-edges). Objective: maximize $sum_v x_v$. Equivalently, IS on the complement graph. _Solution extraction:_ $K = {v : x_v = 1}$. ] +#reduction-rule("TravelingSalesman", "ILP", + example: true, + example-caption: [Weighted $K_4$: the optimal tour $0 arrow 1 arrow 3 arrow 2 arrow 0$ with cost 80 is found by position-based ILP.], +)[ + The traveling salesman problem reduces to binary ILP with $n^2 + 2 m n$ variables via position-based encoding with McCormick linearization. +][ + _Construction._ For graph $G = (V, E)$ with $n = |V|$ and $m = |E|$: + + _Variables:_ Binary $x_(v,k) in {0, 1}$ for each vertex $v in V$ and position $k in {0, ..., n-1}$. Interpretation: $x_(v,k) = 1$ iff vertex $v$ is at position $k$ in the tour. + + _Auxiliary variables:_ For each edge $(u,v) in E$ and position $k$, introduce $y_(u,v,k)$ and $y_(v,u,k)$ to linearize the products $x_(u,k) dot x_(v,(k+1) mod n)$ and $x_(v,k) dot x_(u,(k+1) mod n)$ respectively. + + _Constraints:_ (1) Each vertex has exactly one position: $sum_(k=0)^(n-1) x_(v,k) = 1$ for all $v in V$. (2) Each position has exactly one vertex: $sum_(v in V) x_(v,k) = 1$ for all $k$. (3) Non-edge consecutive prohibition: if ${v,w} in.not E$, then $x_(v,k) + x_(w,(k+1) mod n) <= 1$ for all $k$. (4) McCormick: $y <= x_(v,k)$, $y <= x_(w,(k+1) mod n)$, $y >= x_(v,k) + x_(w,(k+1) mod n) - 1$. + + _Objective:_ Minimize $sum_((u,v) in E) w(u,v) dot sum_k (y_(u,v,k) + y_(v,u,k))$. + + _Solution extraction._ For each position $k$, find vertex $v$ with $x_(v,k) = 1$ to recover the tour permutation; then select edges between consecutive positions. +] + == Unit Disk Mapping #reduction-rule("MaximumIndependentSet", "GridGraph")[ @@ -918,6 +937,7 @@ The following table shows concrete variable overhead for example instances, gene "kcoloring_to_ilp", "factoring_to_ilp", "maximumsetpacking_to_ilp", "minimumsetcovering_to_ilp", "minimumdominatingset_to_ilp", "maximumclique_to_ilp", + "travelingsalesman_to_ilp", ) #let examples = example-files.map(n => { diff --git a/docs/plans/2026-02-13-travelingsalesman-ilp.md b/docs/plans/2026-02-13-travelingsalesman-ilp.md new file mode 100644 index 000000000..aa30fa7d9 --- /dev/null +++ b/docs/plans/2026-02-13-travelingsalesman-ilp.md @@ -0,0 +1,696 @@ +# TravelingSalesman → ILP Reduction Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Implement a reduction from TravelingSalesman to ILP using position-based variables with McCormick linearization. + +**Architecture:** Introduce binary variables x_{v,k} (vertex v at position k) and auxiliary variables y_{v,w,k} for linearizing products. Constraints enforce a valid permutation (assignment), prohibit non-edge consecutive positions, and McCormick constraints linearize the objective. Solution extraction reads the tour permutation from x variables and maps back to edge selection. + +**Tech Stack:** Rust, `#[cfg(feature = "ilp")]` gated, `ILPSolver` for solving, `BruteForce` for cross-validation. + +--- + +## Background + +The issue says "HamiltonianCycle" but the model was renamed to `TravelingSalesman` during development. The source problem is `TravelingSalesman<SimpleGraph, i32>`. + +**Source model** (`src/models/graph/traveling_salesman.rs`): +- Variables: one binary per edge (0 = not in cycle, 1 = in cycle) +- Metric: `SolutionSize<W>` (minimize total edge weight) +- Key methods: `num_vertices()`, `num_edges()`, `edges() -> Vec<(usize, usize, W)>`, `graph() -> &G` + +**Target model** (`src/models/optimization/ilp.rs`): +- `ILP::new(num_vars, bounds, constraints, objective, sense)` +- `VarBounds::binary()`, `LinearConstraint::le/ge/eq(terms, rhs)` + +**Reference reduction with similar structure:** `src/rules/coloring_ilp.rs` (vertex × color variables, assignment constraints, non-trivial extraction) + +## ILP Formulation + +Given graph G=(V,E) with n=|V|, m=|E|, edge weights w. + +**Main variables:** x_{v,k} ∈ {0,1} for v ∈ V, k ∈ {0,...,n-1}. Meaning: vertex v is at position k in the tour. Index: `v * n + k`. Total: n². + +**Auxiliary variables:** For each undirected edge (u,v) ∈ E (with u < v) and each position k ∈ {0,...,n-1}, introduce two auxiliary variables for the two directions: +- y_{u,v,k} linearizes x_{u,k} · x_{v,(k+1) mod n} (edge traversed u→v at position k) +- y_{v,u,k} linearizes x_{v,k} · x_{u,(k+1) mod n} (edge traversed v→u at position k) + +Index auxiliary as: n² + edge_idx * 2n + 2k + direction (0 for u→v, 1 for v→u). Total auxiliary: 2mn. + +**Constraints:** +1. Each vertex has exactly one position: Σ_k x_{v,k} = 1 for all v. (n constraints) +2. Each position has exactly one vertex: Σ_v x_{v,k} = 1 for all k. (n constraints) +3. Non-edge consecutive prohibition: For each ordered pair (v,w) where {v,w} ∉ E (and v ≠ w), for each k: x_{v,k} + x_{w,(k+1) mod n} ≤ 1. Count: n · (n(n-1) - 2m) constraints. +4. McCormick linearization (3 constraints per auxiliary variable): + - y ≤ x_{v,k} + - y ≤ x_{w,(k+1) mod n} + - y ≥ x_{v,k} + x_{w,(k+1) mod n} - 1 + Total: 3 · 2mn = 6mn constraints. + +**Objective:** Minimize Σ_{(u,v)∈E} w(u,v) · Σ_k (y_{u,v,k} + y_{v,u,k}) + +**Solution extraction:** +1. Read x_{v,k} from ILP solution: for each position k, find vertex v where x_{v,k} = 1 → get tour permutation π +2. Convert tour to edge selection: for each consecutive pair (π(k), π((k+1) mod n)), find the edge index in the source graph and set it to 1 + +**Size overhead:** +- num_vars: n² + 2mn +- num_constraints: 2n + n(n(n-1) - 2m) + 6mn + +--- + +### Task 1: Implement TravelingSalesman → ILP reduction + +**Files:** +- Create: `src/rules/travelingsalesman_ilp.rs` +- Modify: `src/rules/mod.rs` + +**Step 1: Create the reduction file** + +Create `src/rules/travelingsalesman_ilp.rs`: + +```rust +//! Reduction from TravelingSalesman to ILP (Integer Linear Programming). +//! +//! Uses position-based variables x_{v,k} with McCormick linearization. +//! - Variables: x_{v,k} for vertex v at position k (binary), plus auxiliary y variables +//! - Constraints: assignment, non-edge consecutive, McCormick +//! - Objective: minimize total edge weight of the tour + +use crate::models::graph::TravelingSalesman; +use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::rules::registry::ReductionOverhead; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing TravelingSalesman to ILP. +#[derive(Debug, Clone)] +pub struct ReductionTSPToILP { + target: ILP, + /// Number of vertices in the source graph. + num_vertices: usize, + /// Edges of the source graph (for solution extraction). + source_edges: Vec<(usize, usize)>, +} + +impl ReductionTSPToILP { + /// Variable index for x_{v,k}: vertex v at position k. + fn x_index(&self, v: usize, k: usize) -> usize { + v * self.num_vertices + k + } +} + +impl ReductionResult for ReductionTSPToILP { + type Source = TravelingSalesman<SimpleGraph, i32>; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution: read tour permutation from x variables, + /// then map to edge selection for the source problem. + fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> { + let n = self.num_vertices; + + // Read tour: for each position k, find vertex v with x_{v,k} = 1 + let mut tour = vec![0usize; n]; + for k in 0..n { + for v in 0..n { + if target_solution[self.x_index(v, k)] == 1 { + tour[k] = v; + break; + } + } + } + + // Map tour to edge selection + let mut edge_selection = vec![0usize; self.source_edges.len()]; + for k in 0..n { + let u = tour[k]; + let v = tour[(k + 1) % n]; + // Find the edge index for (u, v) or (v, u) + for (idx, &(a, b)) in self.source_edges.iter().enumerate() { + if (a == u && b == v) || (a == v && b == u) { + edge_selection[idx] = 1; + break; + } + } + } + + edge_selection + } +} + +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices ^ 2) + poly!(num_vertices ^ 2 * num_edges)), + ("num_constraints", poly!(num_vertices) + poly!(num_vertices ^ 3) + poly!(num_vertices * num_edges)), + ]) + } +)] +impl ReduceTo<ILP> for TravelingSalesman<SimpleGraph, i32> { + type Result = ReductionTSPToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges_with_weights = self.edges(); + let source_edges: Vec<(usize, usize)> = edges_with_weights.iter().map(|&(u, v, _)| (u, v)).collect(); + let edge_weights: Vec<f64> = edges_with_weights.iter().map(|&(_, _, w)| w as f64).collect(); + let m = source_edges.len(); + + // Variable layout: + // [0, n²): x_{v,k} = vertex v at position k + // [n², n² + 2mn): auxiliary y variables for McCormick linearization + // For edge_idx e and position k: + // y_{forward} at n² + e * 2n + 2k (x_{u,k} * x_{v,(k+1)%n}) + // y_{reverse} at n² + e * 2n + 2k + 1 (x_{v,k} * x_{u,(k+1)%n}) + let num_x = n * n; + let num_y = 2 * m * n; + let num_vars = num_x + num_y; + + let x_idx = |v: usize, k: usize| -> usize { v * n + k }; + let y_idx = |edge: usize, k: usize, dir: usize| -> usize { num_x + edge * 2 * n + 2 * k + dir }; + + let bounds = vec![VarBounds::binary(); num_vars]; + let mut constraints = Vec::new(); + + // Constraint 1: Each vertex has exactly one position + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|k| (x_idx(v, k), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Constraint 2: Each position has exactly one vertex + for k in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, k), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Constraint 3: Non-edge consecutive prohibition + // For each ordered pair (v, w) where {v, w} ∉ E and v ≠ w: + // x_{v,k} + x_{w,(k+1) mod n} <= 1 for all k + for v in 0..n { + for w in 0..n { + if v == w { + continue; + } + if graph.has_edge(v, w) { + continue; + } + for k in 0..n { + constraints.push(LinearConstraint::le( + vec![(x_idx(v, k), 1.0), (x_idx(w, (k + 1) % n), 1.0)], + 1.0, + )); + } + } + } + + // Constraint 4: McCormick linearization for auxiliary variables + // For each edge (u, v) at index e: + // Forward (dir=0): y = x_{u,k} * x_{v,(k+1)%n} + // Reverse (dir=1): y = x_{v,k} * x_{u,(k+1)%n} + for (e, &(u, v)) in source_edges.iter().enumerate() { + for k in 0..n { + let k_next = (k + 1) % n; + + // Forward: y_{e,k,0} = x_{u,k} * x_{v,k_next} + let y_fwd = y_idx(e, k, 0); + let xu = x_idx(u, k); + let xv_next = x_idx(v, k_next); + constraints.push(LinearConstraint::le(vec![(y_fwd, 1.0), (xu, -1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(y_fwd, 1.0), (xv_next, -1.0)], 0.0)); + constraints.push(LinearConstraint::ge( + vec![(y_fwd, 1.0), (xu, -1.0), (xv_next, -1.0)], + -1.0, + )); + + // Reverse: y_{e,k,1} = x_{v,k} * x_{u,k_next} + let y_rev = y_idx(e, k, 1); + let xv = x_idx(v, k); + let xu_next = x_idx(u, k_next); + constraints.push(LinearConstraint::le(vec![(y_rev, 1.0), (xv, -1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(y_rev, 1.0), (xu_next, -1.0)], 0.0)); + constraints.push(LinearConstraint::ge( + vec![(y_rev, 1.0), (xv, -1.0), (xu_next, -1.0)], + -1.0, + )); + } + } + + // Objective: minimize Σ_{e=(u,v)} w_e * Σ_k (y_{e,k,0} + y_{e,k,1}) + let mut objective: Vec<(usize, f64)> = Vec::new(); + for (e, &w) in edge_weights.iter().enumerate() { + for k in 0..n { + objective.push((y_idx(e, k, 0), w)); + objective.push((y_idx(e, k, 1), w)); + } + } + + let target = ILP::new(num_vars, bounds, constraints, objective, ObjectiveSense::Minimize); + + ReductionTSPToILP { + target, + num_vertices: n, + source_edges, + } + } +} + +#[cfg(test)] +#[path = "../unit_tests/rules/travelingsalesman_ilp.rs"] +mod tests; +``` + +**Step 2: Register in `src/rules/mod.rs`** + +Add after the existing ILP reduction registrations (after `mod minimumvertexcover_ilp;`): + +```rust +#[cfg(feature = "ilp")] +mod travelingsalesman_ilp; +``` + +And after the existing `pub use minimumvertexcover_ilp::...` line: + +```rust +#[cfg(feature = "ilp")] +pub use travelingsalesman_ilp::ReductionTSPToILP; +``` + +**Step 3: Run build to verify compilation** + +Run: `cargo build --features ilp` +Expected: Compiles successfully + +**Step 4: Commit** + +```bash +git add src/rules/travelingsalesman_ilp.rs src/rules/mod.rs +git commit -m "feat: add TravelingSalesman to ILP reduction (#52)" +``` + +--- + +### Task 2: Write unit tests for the reduction + +**Files:** +- Create: `src/unit_tests/rules/travelingsalesman_ilp.rs` + +**Step 1: Create the unit test file** + +Create `src/unit_tests/rules/travelingsalesman_ilp.rs`: + +```rust +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::SolutionSize; + +#[test] +fn test_reduction_creates_valid_ilp_c4() { + // C4 cycle: 4 vertices, 4 edges. Unique Hamiltonian cycle (the cycle itself). + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=4, m=4: num_vars = 16 + 2*4*4 = 48 + assert_eq!(ilp.num_vars, 48); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } +} + +#[test] +fn test_reduction_c4_closed_loop() { + // C4 cycle with unit weights: optimal tour cost = 4 + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify extracted solution is valid on source problem + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid(), "Extracted solution must be valid"); + assert_eq!(metric, SolutionSize::Valid(4)); +} + +#[test] +fn test_reduction_k4_weighted_closed_loop() { + // K4 weighted: find minimum weight Hamiltonian cycle + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), (0, 2, 15), (0, 3, 20), + (1, 2, 35), (1, 3, 25), (2, 3, 30), + ], + ); + + // Solve via ILP reduction + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Solve via brute force for cross-check + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + let bf_metric = problem.evaluate(&bf_solutions[0]); + let ilp_metric = problem.evaluate(&extracted); + + assert!(ilp_metric.is_valid()); + assert_eq!(ilp_metric, bf_metric, "ILP and brute force must agree on optimal cost"); +} + +#[test] +fn test_reduction_c5_unweighted_closed_loop() { + // C5 cycle with unit weights + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 5, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)], + ); + + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid()); + assert_eq!(metric, SolutionSize::Valid(5)); +} + +#[test] +fn test_no_hamiltonian_cycle_infeasible() { + // Path graph 0-1-2-3: no Hamiltonian cycle exists + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3)], + ); + + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(ilp); + + assert!(result.is_none(), "Path graph should have no Hamiltonian cycle (infeasible ILP)"); +} + +#[test] +fn test_solution_extraction_structure() { + // C4 cycle: verify extraction produces correct edge selection format + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Should have one value per edge + assert_eq!(extracted.len(), 4); + // All edges should be selected (C4 has unique cycle = all edges) + assert_eq!(extracted.iter().sum::<usize>(), 4); +} + +#[test] +fn test_solve_reduced() { + // Test via ILPSolver::solve_reduced + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), (0, 2, 15), (0, 3, 20), + (1, 2, 35), (1, 3, 25), (2, 3, 30), + ], + ); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver.solve_reduced(&problem).expect("solve_reduced should work"); + + let metric = problem.evaluate(&solution); + assert!(metric.is_valid()); + + // Cross-check with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + assert_eq!(metric, problem.evaluate(&bf_solutions[0])); +} +``` + +**Step 2: Run tests** + +Run: `cargo test --features ilp travelingsalesman_ilp -- --nocapture` +Expected: All 7 tests pass + +**Step 3: Commit** + +```bash +git add src/unit_tests/rules/travelingsalesman_ilp.rs +git commit -m "test: add unit tests for TravelingSalesman to ILP reduction (#52)" +``` + +--- + +### Task 3: Write example program + +**Files:** +- Create: `examples/reduction_travelingsalesman_to_ilp.rs` +- Modify: `tests/suites/examples.rs` + +**Step 1: Create example file** + +Create `examples/reduction_travelingsalesman_to_ilp.rs`: + +```rust +// # Traveling Salesman to ILP Reduction +// +// ## Mathematical Formulation +// Variables: x_{v,k} in {0,1} for vertex v and position k; +// auxiliary y variables for McCormick linearization of products. +// Constraints: assignment, non-edge consecutive prohibition, McCormick. +// Objective: minimize total edge weight of the tour. +// +// ## This Example +// - Instance: K4 complete graph with weights +// - Source: TravelingSalesman with 4 vertices, 6 edges +// - Target: ILP with position-based binary variables +// +// ## Output +// Exports `docs/paper/examples/travelingsalesman_to_ilp.json` and `travelingsalesman_to_ilp.result.json`. + +use problemreductions::export::*; +use problemreductions::prelude::*; +use problemreductions::topology::SimpleGraph; + +pub fn run() { + // 1. Create TSP instance: K4 with weights + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), (0, 2, 15), (0, 3, 20), + (1, 2, 35), (1, 3, 25), (2, 3, 30), + ], + ); + + // 2. Reduce to ILP + let reduction = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 3. Print transformation + println!("\n=== Problem Transformation ==="); + println!( + "Source: TravelingSalesman with {} variables ({} edges)", + problem.num_variables(), + problem.num_edges() + ); + println!( + "Target: ILP with {} variables, {} constraints", + ilp.num_vars, + ilp.constraints.len() + ); + + // 4. Solve target ILP + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + + // 5. Extract source solution + let tsp_solution = reduction.extract_solution(&ilp_solution); + println!("\n=== Solution ==="); + println!("Edge selection: {:?}", tsp_solution); + + // 6. Verify + let metric = problem.evaluate(&tsp_solution); + println!("Tour cost: {:?}", metric); + assert!(metric.is_valid()); + + // Cross-check with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + let bf_metric = problem.evaluate(&bf_solutions[0]); + assert_eq!(metric, bf_metric, "ILP must match brute force optimum"); + println!("Brute force confirms optimality"); + + // 7. Collect solutions and export JSON + let mut solutions = Vec::new(); + solutions.push(SolutionPair { + source_config: tsp_solution.clone(), + target_config: ilp_solution, + }); + + let overhead = lookup_overhead_or_empty("TravelingSalesman", "ILP"); + let edges: Vec<(usize, usize)> = problem.edges().iter().map(|&(u, v, _)| (u, v)).collect(); + + let data = ReductionData { + source: ProblemSide { + problem: TravelingSalesman::<SimpleGraph, i32>::NAME.to_string(), + variant: variant_to_map(TravelingSalesman::<SimpleGraph, i32>::variant()), + instance: serde_json::json!({ + "num_vertices": problem.num_vertices(), + "num_edges": problem.num_edges(), + "edges": edges, + }), + }, + target: ProblemSide { + problem: ILP::NAME.to_string(), + variant: variant_to_map(ILP::variant()), + instance: serde_json::json!({ + "num_vars": ilp.num_vars, + "num_constraints": ilp.constraints.len(), + }), + }, + overhead: overhead_to_json(&overhead), + }; + + let results = ResultData { solutions }; + let name = "travelingsalesman_to_ilp"; + write_example(name, &data, &results); +} + +fn main() { + run() +} +``` + +**Step 2: Register in `tests/suites/examples.rs`** + +Add after existing example registrations: + +```rust +example_test!(reduction_travelingsalesman_to_ilp); +``` + +And the corresponding test function: + +```rust +example_fn!( + test_travelingsalesman_to_ilp, + reduction_travelingsalesman_to_ilp +); +``` + +**Step 3: Run example test** + +Run: `cargo test --features ilp test_travelingsalesman_to_ilp -- --nocapture` +Expected: PASS + +**Step 4: Commit** + +```bash +git add examples/reduction_travelingsalesman_to_ilp.rs tests/suites/examples.rs +git commit -m "feat: add TravelingSalesman to ILP example (#52)" +``` + +--- + +### Task 4: Document in paper + +**Files:** +- Modify: `docs/paper/reductions.typ` + +**Step 1: Add reduction-rule entry** + +Add the following `reduction-rule` entry in `docs/paper/reductions.typ` in the appropriate location (near other ILP reductions): + +```typst +#reduction-rule("TravelingSalesman", "ILP", + example: true, + example-caption: [Weighted $K_4$: the optimal tour $0 arrow 1 arrow 3 arrow 2 arrow 0$ with cost 80 is found by position-based ILP.], +)[ + The traveling salesman problem reduces to binary ILP with $n^2 + 2 m n$ variables via position-based encoding with McCormick linearization. +][ + _Construction._ For graph $G = (V, E)$ with $n = |V|$ and $m = |E|$: + + _Variables:_ Binary $x_(v,k) in {0, 1}$ for each vertex $v in V$ and position $k in {0, ..., n-1}$. Interpretation: $x_(v,k) = 1$ iff vertex $v$ is at position $k$ in the tour. + + _Auxiliary variables:_ For each edge $(u,v) in E$ and position $k$, introduce $y_(u,v,k)$ and $y_(v,u,k)$ to linearize the products $x_(u,k) dot x_(v,(k+1) mod n)$ and $x_(v,k) dot x_(u,(k+1) mod n)$ respectively. + + _Constraints:_ (1) Each vertex has exactly one position: $sum_(k=0)^(n-1) x_(v,k) = 1$ for all $v in V$. (2) Each position has exactly one vertex: $sum_(v in V) x_(v,k) = 1$ for all $k$. (3) Non-edge consecutive prohibition: if ${v,w} in.not E$, then $x_(v,k) + x_(w,(k+1) mod n) <= 1$ for all $k$. (4) McCormick: $y <= x_(v,k)$, $y <= x_(w,(k+1) mod n)$, $y >= x_(v,k) + x_(w,(k+1) mod n) - 1$. + + _Objective:_ Minimize $sum_((u,v) in E) w(u,v) dot sum_k (y_(u,v,k) + y_(v,u,k))$. + + _Solution extraction._ For each position $k$, find vertex $v$ with $x_(v,k) = 1$ to recover the tour permutation; then select edges between consecutive positions. +] +``` + +**Step 2: Commit** + +```bash +git add docs/paper/reductions.typ +git commit -m "docs: add TravelingSalesman to ILP reduction rule in paper (#52)" +``` + +--- + +### Task 5: Regenerate reduction graph and verify + +**Step 1: Regenerate the reduction graph** + +Run: `cargo run --features ilp --example export_graph` +Expected: Updates `docs/src/reductions/reduction_graph.json` with TravelingSalesman → ILP edge + +**Step 2: Run full test suite** + +Run: `make test clippy` +Expected: All tests pass, no clippy warnings + +**Step 3: Run coverage check** + +Run: `make coverage` +Expected: Coverage >95% for new code + +**Step 4: Commit any generated files** + +```bash +git add docs/src/reductions/reduction_graph.json +git commit -m "chore: regenerate reduction graph with TravelingSalesman to ILP edge (#52)" +``` + +--- + +## Notes + +- The `poly!()` macro expressions for overhead are approximate upper bounds. Verify the exact polynomial forms compile correctly; adjust if the macro doesn't support the needed expressions. +- The `ILPSolver::solve()` returns `Option<Vec<usize>>` — `None` means infeasible, which is the expected result for graphs without Hamiltonian cycles. +- For small test instances (n ≤ 5), the ILP solver should complete quickly. Avoid K5 or larger complete graphs in tests as the ILP grows rapidly. +- The `#[reduction(...)]` proc macro must be on the `impl ReduceTo<ILP>` block. Check that `poly!` supports the `^` operator or use multiplication: `poly!(num_vertices * num_vertices)`. diff --git a/docs/src/getting-started.md b/docs/src/getting-started.md index db15eed9d..f37e29e20 100644 --- a/docs/src/getting-started.md +++ b/docs/src/getting-started.md @@ -2,7 +2,7 @@ ## What This Library Does -**problemreductions** transforms hard computational problems into forms that efficient solvers can handle. You define a problem, reduce it to another problem type (like QUBO or ILP), solve the reduced problem, and extract the solution back. The [interactive reduction graph](./introduction.html) shows all available problem types and transformations. +**problem-reductions** transforms hard computational problems into forms that efficient solvers can handle. You define a problem, reduce it to another problem type (like QUBO or ILP), solve the reduced problem, and extract the solution back. The [interactive reduction graph](./introduction.html) shows all available problem types and transformations. ## Installation diff --git a/docs/src/introduction.md b/docs/src/introduction.md index b27ee383e..289a1b5a9 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -1,12 +1,6 @@ # Problem Reductions -A Rust library for reducing NP-hard problems. - -## Overview - -**problemreductions** provides implementations of various computational hard problems and reduction rules between them. It is designed for algorithm research, education, and industry applications. - -For theoretical background and correctness proofs, see the [PDF manual](https://codingthrust.github.io/problem-reductions/reductions.pdf). +**problem-reductions** is a rust library that provides implementations of various computational hard problems and reduction rules between them. It is designed for algorithm research, education, and industry applications. ## Reduction Graph @@ -353,6 +347,8 @@ For theoretical background and correctness proofs, see the [PDF manual](https:// })(); </script> +For theoretical background and correctness proofs, see the [PDF manual](https://codingthrust.github.io/problem-reductions/reductions.pdf). + ## Our Vision Computational complexity theory has produced a rich body of polynomial-time reductions between NP-hard problems, yet these results largely remain confined to papers. The gap between theoretical algorithms and working software leads to two persistent inefficiencies: diff --git a/docs/src/reductions/reduction_graph.json b/docs/src/reductions/reduction_graph.json index d3122dce9..98821b4bc 100644 --- a/docs/src/reductions/reduction_graph.json +++ b/docs/src/reductions/reduction_graph.json @@ -68,6 +68,15 @@ "category": "graph", "doc_path": "models/graph/struct.KColoring.html" }, + { + "name": "KColoring", + "variant": { + "graph": "SimpleGraph", + "weight": "Unweighted" + }, + "category": "graph", + "doc_path": "models/graph/struct.KColoring.html" + }, { "name": "KSatisfiability", "variant": {}, @@ -92,6 +101,24 @@ "category": "satisfiability", "doc_path": "models/satisfiability/struct.KSatisfiability.html" }, + { + "name": "KSatisfiability", + "variant": { + "k": "4", + "weight": "Unweighted" + }, + "category": "satisfiability", + "doc_path": "models/satisfiability/struct.KSatisfiability.html" + }, + { + "name": "KSatisfiability", + "variant": { + "k": "5", + "weight": "Unweighted" + }, + "category": "satisfiability", + "doc_path": "models/satisfiability/struct.KSatisfiability.html" + }, { "name": "KSatisfiability", "variant": { @@ -125,6 +152,21 @@ "category": "graph", "doc_path": "models/graph/struct.MaxCut.html" }, + { + "name": "MaximumClique", + "variant": {}, + "category": "graph", + "doc_path": "models/graph/struct.MaximumClique.html" + }, + { + "name": "MaximumClique", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumClique.html" + }, { "name": "MaximumIndependentSet", "variant": {}, @@ -182,6 +224,15 @@ "category": "graph", "doc_path": "models/graph/struct.MaximumMatching.html" }, + { + "name": "MaximumMatching", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "graph", + "doc_path": "models/graph/struct.MaximumMatching.html" + }, { "name": "MaximumSetPacking", "variant": {}, @@ -236,6 +287,15 @@ "category": "set", "doc_path": "models/set/struct.MinimumSetCovering.html" }, + { + "name": "MinimumSetCovering", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "set", + "doc_path": "models/set/struct.MinimumSetCovering.html" + }, { "name": "MinimumVertexCover", "variant": {}, @@ -331,6 +391,21 @@ }, "category": "optimization", "doc_path": "models/optimization/struct.SpinGlass.html" + }, + { + "name": "TravelingSalesman", + "variant": {}, + "category": "graph", + "doc_path": "models/graph/struct.TravelingSalesman.html" + }, + { + "name": "TravelingSalesman", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + }, + "category": "graph", + "doc_path": "models/graph/struct.TravelingSalesman.html" } ], "edges": [ @@ -468,7 +543,7 @@ } }, "target": { - "name": "ILP", + "name": "QUBO", "variant": { "graph": "SimpleGraph", "weight": "f64" @@ -478,24 +553,20 @@ { "field": "num_vars", "formula": "num_vertices * num_colors" - }, - { - "field": "num_constraints", - "formula": "num_vertices + num_edges * num_colors" } ], - "doc_path": "rules/coloring_ilp/index.html" + "doc_path": "rules/coloring_qubo/index.html" }, { "source": { "name": "KColoring", "variant": { "graph": "SimpleGraph", - "k": "N" + "weight": "Unweighted" } }, "target": { - "name": "QUBO", + "name": "ILP", "variant": { "graph": "SimpleGraph", "weight": "f64" @@ -505,9 +576,13 @@ { "field": "num_vars", "formula": "num_vertices * num_colors" + }, + { + "field": "num_constraints", + "formula": "num_vertices + num_edges * num_colors" } ], - "doc_path": "rules/coloring_qubo/index.html" + "doc_path": "rules/coloring_ilp/index.html" }, { "source": { @@ -609,6 +684,60 @@ ], "doc_path": "rules/ksatisfiability_qubo/index.html" }, + { + "source": { + "name": "KSatisfiability", + "variant": { + "k": "4", + "weight": "Unweighted" + } + }, + "target": { + "name": "KSatisfiability", + "variant": { + "k": "N", + "weight": "Unweighted" + } + }, + "overhead": [ + { + "field": "num_clauses", + "formula": "num_clauses" + }, + { + "field": "num_vars", + "formula": "num_vars" + } + ], + "doc_path": "" + }, + { + "source": { + "name": "KSatisfiability", + "variant": { + "k": "5", + "weight": "Unweighted" + } + }, + "target": { + "name": "KSatisfiability", + "variant": { + "k": "N", + "weight": "Unweighted" + } + }, + "overhead": [ + { + "field": "num_clauses", + "formula": "num_clauses" + }, + { + "field": "num_vars", + "formula": "num_vars" + } + ], + "doc_path": "" + }, { "source": { "name": "KSatisfiability", @@ -690,6 +819,33 @@ ], "doc_path": "rules/spinglass_maxcut/index.html" }, + { + "source": { + "name": "MaximumClique", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_vertices^2" + } + ], + "doc_path": "rules/maximumclique_ilp/index.html" + }, { "source": { "name": "MaximumIndependentSet", @@ -852,6 +1008,33 @@ ], "doc_path": "rules/minimumvertexcover_maximumindependentset/index.html" }, + { + "source": { + "name": "MaximumIndependentSet", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_edges" + } + ], + "doc_path": "rules/maximumindependentset_ilp/index.html" + }, { "source": { "name": "MaximumIndependentSet", @@ -929,6 +1112,24 @@ ], "doc_path": "" }, + { + "source": { + "name": "MaximumMatching", + "variant": { + "graph": "SimpleGraph", + "weight": "Unweighted" + } + }, + "target": { + "name": "MaximumMatching", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "overhead": [], + "doc_path": "" + }, { "source": { "name": "MaximumMatching", @@ -956,6 +1157,33 @@ ], "doc_path": "rules/maximummatching_maximumsetpacking/index.html" }, + { + "source": { + "name": "MaximumMatching", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_edges" + }, + { + "field": "num_constraints", + "formula": "num_vertices" + } + ], + "doc_path": "rules/maximummatching_ilp/index.html" + }, { "source": { "name": "MaximumSetPacking", @@ -1010,6 +1238,33 @@ ], "doc_path": "" }, + { + "source": { + "name": "MaximumSetPacking", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_sets" + }, + { + "field": "num_constraints", + "formula": "num_sets^2" + } + ], + "doc_path": "rules/maximumsetpacking_ilp/index.html" + }, { "source": { "name": "MaximumSetPacking", @@ -1033,6 +1288,87 @@ ], "doc_path": "rules/maximumsetpacking_qubo/index.html" }, + { + "source": { + "name": "MinimumDominatingSet", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_vertices" + } + ], + "doc_path": "rules/minimumdominatingset_ilp/index.html" + }, + { + "source": { + "name": "MinimumSetCovering", + "variant": { + "graph": "SimpleGraph", + "weight": "Unweighted" + } + }, + "target": { + "name": "MinimumSetCovering", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "overhead": [ + { + "field": "num_sets", + "formula": "num_sets" + }, + { + "field": "num_elements", + "formula": "num_elements" + } + ], + "doc_path": "" + }, + { + "source": { + "name": "MinimumSetCovering", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_sets" + }, + { + "field": "num_constraints", + "formula": "universe_size" + } + ], + "doc_path": "rules/minimumsetcovering_ilp/index.html" + }, { "source": { "name": "MinimumVertexCover", @@ -1114,6 +1450,33 @@ ], "doc_path": "" }, + { + "source": { + "name": "MinimumVertexCover", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices" + }, + { + "field": "num_constraints", + "formula": "num_edges" + } + ], + "doc_path": "rules/minimumvertexcover_ilp/index.html" + }, { "source": { "name": "MinimumVertexCover", @@ -1198,7 +1561,61 @@ "target": { "name": "KSatisfiability", "variant": { - "k": "N", + "k": "3", + "weight": "Unweighted" + } + }, + "overhead": [ + { + "field": "num_clauses", + "formula": "num_clauses + num_literals" + }, + { + "field": "num_vars", + "formula": "num_vars + num_literals" + } + ], + "doc_path": "rules/sat_ksat/index.html" + }, + { + "source": { + "name": "Satisfiability", + "variant": { + "graph": "SimpleGraph", + "weight": "Unweighted" + } + }, + "target": { + "name": "KSatisfiability", + "variant": { + "k": "4", + "weight": "Unweighted" + } + }, + "overhead": [ + { + "field": "num_clauses", + "formula": "num_clauses + num_literals" + }, + { + "field": "num_vars", + "formula": "num_vars + num_literals" + } + ], + "doc_path": "rules/sat_ksat/index.html" + }, + { + "source": { + "name": "Satisfiability", + "variant": { + "graph": "SimpleGraph", + "weight": "Unweighted" + } + }, + "target": { + "name": "KSatisfiability", + "variant": { + "k": "5", "weight": "Unweighted" } }, @@ -1425,6 +1842,33 @@ } ], "doc_path": "" + }, + { + "source": { + "name": "TravelingSalesman", + "variant": { + "graph": "SimpleGraph", + "weight": "i32" + } + }, + "target": { + "name": "ILP", + "variant": { + "graph": "SimpleGraph", + "weight": "f64" + } + }, + "overhead": [ + { + "field": "num_vars", + "formula": "num_vertices^2 + 2 * num_vertices * num_edges" + }, + { + "field": "num_constraints", + "formula": "num_vertices^3 - num_vertices^2 + 2 * num_vertices + 4 * num_vertices * num_edges" + } + ], + "doc_path": "rules/travelingsalesman_ilp/index.html" } ] } \ No newline at end of file diff --git a/examples/reduction_travelingsalesman_to_ilp.rs b/examples/reduction_travelingsalesman_to_ilp.rs new file mode 100644 index 000000000..bbba74d38 --- /dev/null +++ b/examples/reduction_travelingsalesman_to_ilp.rs @@ -0,0 +1,111 @@ +// # Traveling Salesman to ILP Reduction +// +// ## Mathematical Formulation +// Variables: x_{v,k} in {0,1} for vertex v and position k; +// auxiliary y variables for McCormick linearization of products. +// Constraints: assignment, non-edge consecutive prohibition, McCormick. +// Objective: minimize total edge weight of the tour. +// +// ## This Example +// - Instance: K4 complete graph with weights +// - Source: TravelingSalesman with 4 vertices, 6 edges +// - Target: ILP with position-based binary variables +// +// ## Output +// Exports `docs/paper/examples/travelingsalesman_to_ilp.json` and `travelingsalesman_to_ilp.result.json`. + +use problemreductions::export::*; +use problemreductions::prelude::*; +use problemreductions::solvers::ILPSolver; +use problemreductions::topology::SimpleGraph; + +pub fn run() { + // 1. Create TSP instance: K4 with weights + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), + (0, 2, 15), + (0, 3, 20), + (1, 2, 35), + (1, 3, 25), + (2, 3, 30), + ], + ); + + // 2. Reduce to ILP + let reduction = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 3. Print transformation + println!("\n=== Problem Transformation ==="); + println!( + "Source: TravelingSalesman with {} variables ({} edges)", + problem.num_variables(), + problem.num_edges() + ); + println!( + "Target: ILP with {} variables, {} constraints", + ilp.num_vars, + ilp.constraints.len() + ); + + // 4. Solve target ILP + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + + // 5. Extract source solution + let tsp_solution = reduction.extract_solution(&ilp_solution); + println!("\n=== Solution ==="); + println!("Edge selection: {:?}", tsp_solution); + + // 6. Verify + let metric = problem.evaluate(&tsp_solution); + println!("Tour cost: {:?}", metric); + assert!(metric.is_valid()); + + // Cross-check with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + let bf_metric = problem.evaluate(&bf_solutions[0]); + assert_eq!(metric, bf_metric, "ILP must match brute force optimum"); + println!("Brute force confirms optimality"); + + // 7. Collect solutions and export JSON + let solutions = vec![SolutionPair { + source_config: tsp_solution.clone(), + target_config: ilp_solution, + }]; + + let overhead = lookup_overhead_or_empty("TravelingSalesman", "ILP"); + let edges: Vec<(usize, usize)> = problem.edges().iter().map(|&(u, v, _)| (u, v)).collect(); + + let data = ReductionData { + source: ProblemSide { + problem: TravelingSalesman::<SimpleGraph, i32>::NAME.to_string(), + variant: variant_to_map(TravelingSalesman::<SimpleGraph, i32>::variant()), + instance: serde_json::json!({ + "num_vertices": problem.num_vertices(), + "num_edges": problem.num_edges(), + "edges": edges, + }), + }, + target: ProblemSide { + problem: ILP::NAME.to_string(), + variant: variant_to_map(ILP::variant()), + instance: serde_json::json!({ + "num_vars": ilp.num_vars, + "num_constraints": ilp.constraints.len(), + }), + }, + overhead: overhead_to_json(&overhead), + }; + + let results = ResultData { solutions }; + let name = "travelingsalesman_to_ilp"; + write_example(name, &data, &results); +} + +fn main() { + run() +} diff --git a/src/rules/coloring_ilp.rs b/src/rules/coloring_ilp.rs index dcb07bcf6..aba47033f 100644 --- a/src/rules/coloring_ilp.rs +++ b/src/rules/coloring_ilp.rs @@ -10,27 +10,11 @@ use crate::models::graph::KColoring; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; use crate::poly; -use crate::rules::registry::{ReductionEntry, ReductionOverhead}; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::{Graph, SimpleGraph}; -// Register reduction in the inventory for automatic discovery. -// Uses usize::MAX sentinel for K (maps to "N" via const_usize_str). -// G is generic → defaults to SimpleGraph. -inventory::submit! { - ReductionEntry { - source_name: "KColoring", - target_name: "ILP", - source_variant_fn: || <KColoring<{usize::MAX}, SimpleGraph> as crate::traits::Problem>::variant(), - target_variant_fn: || <ILP as crate::traits::Problem>::variant(), - overhead_fn: || ReductionOverhead::new(vec![ - ("num_vars", poly!(num_vertices * num_colors)), - ("num_constraints", poly!(num_vertices) + poly!(num_edges * num_colors)), - ]), - module_path: module_path!(), - } -} - /// Result of reducing KColoring to ILP. /// /// This reduction creates a binary ILP where: @@ -80,6 +64,14 @@ where } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices * num_colors)), + ("num_constraints", poly!(num_vertices) + poly!(num_edges * num_colors)), + ]) + } +)] impl<const K: usize, G> ReduceTo<ILP> for KColoring<K, G> where G: Graph, diff --git a/src/rules/factoring_ilp.rs b/src/rules/factoring_ilp.rs index eaf0cfc71..491c06278 100644 --- a/src/rules/factoring_ilp.rs +++ b/src/rules/factoring_ilp.rs @@ -20,48 +20,11 @@ use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; use crate::models::specialized::Factoring; use crate::polynomial::{Monomial, Polynomial}; -use crate::rules::registry::{ReductionEntry, ReductionOverhead}; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use std::cmp::min; -// Register reduction in the inventory for automatic discovery -inventory::submit! { - ReductionEntry { - source_name: "Factoring", - target_name: "ILP", - source_variant_fn: || <Factoring as crate::traits::Problem>::variant(), - target_variant_fn: || <ILP as crate::traits::Problem>::variant(), - overhead_fn: || ReductionOverhead::new(vec![ - // num_vars = m + n + m*n + num_carries where num_carries = max(m+n, target_bits) - // For feasible instances, target_bits <= m+n, so this is 2(m+n) + m*n - ("num_vars", Polynomial { - terms: vec![ - Monomial::var("num_bits_first").scale(2.0), - Monomial::var("num_bits_second").scale(2.0), - Monomial { - coefficient: 1.0, - variables: vec![("num_bits_first", 1), ("num_bits_second", 1)], - }, - ] - }), - // num_constraints = 3*m*n + num_bit_positions + 1 - // For feasible instances (target_bits <= m+n), this is 3*m*n + (m+n) + 1 - ("num_constraints", Polynomial { - terms: vec![ - Monomial { - coefficient: 3.0, - variables: vec![("num_bits_first", 1), ("num_bits_second", 1)], - }, - Monomial::var("num_bits_first"), - Monomial::var("num_bits_second"), - Monomial::constant(1.0), - ] - }), - ]), - module_path: module_path!(), - } -} - /// Result of reducing Factoring to ILP. /// /// This reduction creates an ILP where: @@ -130,6 +93,35 @@ impl ReductionResult for ReductionFactoringToILP { } } +#[reduction(overhead = { + ReductionOverhead::new(vec![ + // num_vars = m + n + m*n + num_carries where num_carries = max(m+n, target_bits) + // For feasible instances, target_bits <= m+n, so this is 2(m+n) + m*n + ("num_vars", Polynomial { + terms: vec![ + Monomial::var("num_bits_first").scale(2.0), + Monomial::var("num_bits_second").scale(2.0), + Monomial { + coefficient: 1.0, + variables: vec![("num_bits_first", 1), ("num_bits_second", 1)], + }, + ] + }), + // num_constraints = 3*m*n + num_bit_positions + 1 + // For feasible instances (target_bits <= m+n), this is 3*m*n + (m+n) + 1 + ("num_constraints", Polynomial { + terms: vec![ + Monomial { + coefficient: 3.0, + variables: vec![("num_bits_first", 1), ("num_bits_second", 1)], + }, + Monomial::var("num_bits_first"), + Monomial::var("num_bits_second"), + Monomial::constant(1.0), + ] + }), + ]) +})] impl ReduceTo<ILP> for Factoring { type Result = ReductionFactoringToILP; diff --git a/src/rules/graph.rs b/src/rules/graph.rs index a9e98c1fb..fe630ee61 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -925,7 +925,8 @@ impl ReductionGraph { | "KColoring" | "MaximumMatching" | "MaxCut" - | "MaximumClique" => "graph", + | "MaximumClique" + | "TravelingSalesman" => "graph", "Satisfiability" | "KSatisfiability" => "satisfiability", "SpinGlass" | "QUBO" | "ILP" => "optimization", "MinimumSetCovering" | "MaximumSetPacking" => "set", @@ -943,6 +944,7 @@ impl ReductionGraph { || name.contains("MinimumDominatingSet") || name.contains("MaximumMatching") || name.contains("MaximumClique") + || name.contains("TravelingSalesman") { "graph" } else if name.contains("MaximumSetPacking") || name.contains("SetCover") { diff --git a/src/rules/maximumclique_ilp.rs b/src/rules/maximumclique_ilp.rs index 78d9b245f..ec65aa70c 100644 --- a/src/rules/maximumclique_ilp.rs +++ b/src/rules/maximumclique_ilp.rs @@ -8,6 +8,9 @@ use crate::models::graph::MaximumClique; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; @@ -39,6 +42,14 @@ impl ReductionResult for ReductionCliqueToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices)), + ("num_constraints", poly!(num_vertices ^ 2)), + ]) + } +)] impl ReduceTo<ILP> for MaximumClique<SimpleGraph, i32> { type Result = ReductionCliqueToILP; diff --git a/src/rules/maximumindependentset_ilp.rs b/src/rules/maximumindependentset_ilp.rs index 810794ed8..df53f0245 100644 --- a/src/rules/maximumindependentset_ilp.rs +++ b/src/rules/maximumindependentset_ilp.rs @@ -7,6 +7,9 @@ use crate::models::graph::MaximumIndependentSet; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; @@ -38,6 +41,14 @@ impl ReductionResult for ReductionISToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices)), + ("num_constraints", poly!(num_edges)), + ]) + } +)] impl ReduceTo<ILP> for MaximumIndependentSet<SimpleGraph, i32> { type Result = ReductionISToILP; diff --git a/src/rules/maximummatching_ilp.rs b/src/rules/maximummatching_ilp.rs index 0b55a50f9..e228275ad 100644 --- a/src/rules/maximummatching_ilp.rs +++ b/src/rules/maximummatching_ilp.rs @@ -8,6 +8,9 @@ use crate::models::graph::MaximumMatching; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; @@ -39,6 +42,14 @@ impl ReductionResult for ReductionMatchingToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_edges)), + ("num_constraints", poly!(num_vertices)), + ]) + } +)] impl ReduceTo<ILP> for MaximumMatching<SimpleGraph, i32> { type Result = ReductionMatchingToILP; diff --git a/src/rules/maximumsetpacking_ilp.rs b/src/rules/maximumsetpacking_ilp.rs index 6ef752a88..b5f22d74a 100644 --- a/src/rules/maximumsetpacking_ilp.rs +++ b/src/rules/maximumsetpacking_ilp.rs @@ -7,6 +7,9 @@ use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; use crate::models::set::MaximumSetPacking; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; /// Result of reducing MaximumSetPacking to ILP. @@ -37,6 +40,14 @@ impl ReductionResult for ReductionSPToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_sets)), + ("num_constraints", poly!(num_sets ^ 2)), + ]) + } +)] impl ReduceTo<ILP> for MaximumSetPacking<i32> { type Result = ReductionSPToILP; diff --git a/src/rules/minimumdominatingset_ilp.rs b/src/rules/minimumdominatingset_ilp.rs index 1f69d87b1..51dbfe80a 100644 --- a/src/rules/minimumdominatingset_ilp.rs +++ b/src/rules/minimumdominatingset_ilp.rs @@ -8,6 +8,9 @@ use crate::models::graph::MinimumDominatingSet; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; @@ -40,6 +43,14 @@ impl ReductionResult for ReductionDSToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices)), + ("num_constraints", poly!(num_vertices)), + ]) + } +)] impl ReduceTo<ILP> for MinimumDominatingSet<SimpleGraph, i32> { type Result = ReductionDSToILP; diff --git a/src/rules/minimumsetcovering_ilp.rs b/src/rules/minimumsetcovering_ilp.rs index 6f85afd7c..1a3889b72 100644 --- a/src/rules/minimumsetcovering_ilp.rs +++ b/src/rules/minimumsetcovering_ilp.rs @@ -7,6 +7,9 @@ use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; use crate::models::set::MinimumSetCovering; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; /// Result of reducing MinimumSetCovering to ILP. @@ -37,6 +40,14 @@ impl ReductionResult for ReductionSCToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_sets)), + ("num_constraints", poly!(universe_size)), + ]) + } +)] impl ReduceTo<ILP> for MinimumSetCovering<i32> { type Result = ReductionSCToILP; diff --git a/src/rules/minimumvertexcover_ilp.rs b/src/rules/minimumvertexcover_ilp.rs index 4ee3cd6a2..89c712077 100644 --- a/src/rules/minimumvertexcover_ilp.rs +++ b/src/rules/minimumvertexcover_ilp.rs @@ -7,6 +7,9 @@ use crate::models::graph::MinimumVertexCover; use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::poly; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; use crate::rules::traits::{ReduceTo, ReductionResult}; use crate::topology::SimpleGraph; @@ -38,6 +41,14 @@ impl ReductionResult for ReductionVCToILP { } } +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + ("num_vars", poly!(num_vertices)), + ("num_constraints", poly!(num_edges)), + ]) + } +)] impl ReduceTo<ILP> for MinimumVertexCover<SimpleGraph, i32> { type Result = ReductionVCToILP; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 040fda518..64e2f6c52 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -50,6 +50,8 @@ mod minimumdominatingset_ilp; mod minimumsetcovering_ilp; #[cfg(feature = "ilp")] mod minimumvertexcover_ilp; +#[cfg(feature = "ilp")] +mod travelingsalesman_ilp; pub use circuit_spinglass::{ and_gadget, not_gadget, or_gadget, set0_gadget, set1_gadget, xor_gadget, LogicGadget, @@ -96,3 +98,5 @@ pub use minimumdominatingset_ilp::ReductionDSToILP; pub use minimumsetcovering_ilp::ReductionSCToILP; #[cfg(feature = "ilp")] pub use minimumvertexcover_ilp::ReductionVCToILP; +#[cfg(feature = "ilp")] +pub use travelingsalesman_ilp::ReductionTSPToILP; diff --git a/src/rules/sat_ksat.rs b/src/rules/sat_ksat.rs index f654b6e83..d79476506 100644 --- a/src/rules/sat_ksat.rs +++ b/src/rules/sat_ksat.rs @@ -112,6 +112,12 @@ fn add_clause_to_ksat( /// the `ReduceTo` trait pattern used in this crate. macro_rules! impl_sat_to_ksat { ($k:expr) => { + #[reduction(overhead = { + ReductionOverhead::new(vec![ + ("num_clauses", poly!(num_clauses) + poly!(num_literals)), + ("num_vars", poly!(num_vars) + poly!(num_literals)), + ]) + })] impl ReduceTo<KSatisfiability<$k>> for Satisfiability { type Result = ReductionSATToKSAT<$k>; @@ -186,19 +192,3 @@ impl<const K: usize> ReduceTo<Satisfiability> for KSatisfiability<K> { #[cfg(test)] #[path = "../unit_tests/rules/sat_ksat.rs"] mod tests; - -// Register SAT -> KSAT reduction manually (generated by macro, can't use #[reduction]). -// Uses usize::MAX sentinel for K (maps to "N" via const_usize_str). -inventory::submit! { - crate::rules::registry::ReductionEntry { - source_name: "Satisfiability", - target_name: "KSatisfiability", - source_variant_fn: || <Satisfiability as crate::traits::Problem>::variant(), - target_variant_fn: || <KSatisfiability<{usize::MAX}> as crate::traits::Problem>::variant(), - overhead_fn: || ReductionOverhead::new(vec![ - ("num_clauses", poly!(num_clauses) + poly!(num_literals)), - ("num_vars", poly!(num_vars) + poly!(num_literals)), - ]), - module_path: module_path!(), - } -} diff --git a/src/rules/travelingsalesman_ilp.rs b/src/rules/travelingsalesman_ilp.rs new file mode 100644 index 000000000..cf31e7784 --- /dev/null +++ b/src/rules/travelingsalesman_ilp.rs @@ -0,0 +1,217 @@ +//! Reduction from TravelingSalesman to ILP (Integer Linear Programming). +//! +//! Uses position-based variables x_{v,k} with McCormick linearization. +//! - Variables: x_{v,k} for vertex v at position k (binary), plus auxiliary y variables +//! - Constraints: assignment, non-edge consecutive, McCormick +//! - Objective: minimize total edge weight of the tour + +use crate::models::graph::TravelingSalesman; +use crate::models::optimization::{LinearConstraint, ObjectiveSense, VarBounds, ILP}; +use crate::polynomial::{Monomial, Polynomial}; +use crate::reduction; +use crate::rules::registry::ReductionOverhead; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing TravelingSalesman to ILP. +#[derive(Debug, Clone)] +pub struct ReductionTSPToILP { + target: ILP, + /// Number of vertices in the source graph. + num_vertices: usize, + /// Edges of the source graph (for solution extraction). + source_edges: Vec<(usize, usize)>, +} + +impl ReductionTSPToILP { + /// Variable index for x_{v,k}: vertex v at position k. + fn x_index(&self, v: usize, k: usize) -> usize { + v * self.num_vertices + k + } +} + +impl ReductionResult for ReductionTSPToILP { + type Source = TravelingSalesman<SimpleGraph, i32>; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution: read tour permutation from x variables, + /// then map to edge selection for the source problem. + fn extract_solution(&self, target_solution: &[usize]) -> Vec<usize> { + let n = self.num_vertices; + + // Read tour: for each position k, find vertex v with x_{v,k} = 1 + let mut tour = vec![0usize; n]; + for k in 0..n { + for v in 0..n { + if target_solution[self.x_index(v, k)] == 1 { + tour[k] = v; + break; + } + } + } + + // Map tour to edge selection + let mut edge_selection = vec![0usize; self.source_edges.len()]; + for k in 0..n { + let u = tour[k]; + let v = tour[(k + 1) % n]; + // Find the edge index for (u, v) or (v, u) + for (idx, &(a, b)) in self.source_edges.iter().enumerate() { + if (a == u && b == v) || (a == v && b == u) { + edge_selection[idx] = 1; + break; + } + } + } + + edge_selection + } +} + +#[reduction( + overhead = { + ReductionOverhead::new(vec![ + // num_vars = n^2 + 2*m*n + ("num_vars", Polynomial::var_pow("num_vertices", 2) + Polynomial { + terms: vec![Monomial { + coefficient: 2.0, + variables: vec![("num_vertices", 1), ("num_edges", 1)], + }] + }), + // num_constraints = 2n + n(n(n-1) - 2m) + 6mn = n^3 - n^2 + 2n + 4mn + ("num_constraints", Polynomial::var_pow("num_vertices", 3) + Polynomial { + terms: vec![ + Monomial { + coefficient: -1.0, + variables: vec![("num_vertices", 2)], + }, + Monomial { + coefficient: 2.0, + variables: vec![("num_vertices", 1)], + }, + Monomial { + coefficient: 4.0, + variables: vec![("num_vertices", 1), ("num_edges", 1)], + }, + ] + }), + ]) + } +)] +impl ReduceTo<ILP> for TravelingSalesman<SimpleGraph, i32> { + type Result = ReductionTSPToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges_with_weights = self.edges(); + let source_edges: Vec<(usize, usize)> = edges_with_weights.iter().map(|&(u, v, _)| (u, v)).collect(); + let edge_weights: Vec<f64> = edges_with_weights.iter().map(|&(_, _, w)| w as f64).collect(); + let m = source_edges.len(); + + // Variable layout: + // [0, n²): x_{v,k} = vertex v at position k + // [n², n² + 2mn): auxiliary y variables for McCormick linearization + // For edge_idx e and position k: + // y_{forward} at n² + e * 2n + 2k (x_{u,k} * x_{v,(k+1)%n}) + // y_{reverse} at n² + e * 2n + 2k + 1 (x_{v,k} * x_{u,(k+1)%n}) + let num_x = n * n; + let num_y = 2 * m * n; + let num_vars = num_x + num_y; + + let x_idx = |v: usize, k: usize| -> usize { v * n + k }; + let y_idx = |edge: usize, k: usize, dir: usize| -> usize { num_x + edge * 2 * n + 2 * k + dir }; + + let bounds = vec![VarBounds::binary(); num_vars]; + let mut constraints = Vec::new(); + + // Constraint 1: Each vertex has exactly one position + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|k| (x_idx(v, k), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Constraint 2: Each position has exactly one vertex + for k in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, k), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Constraint 3: Non-edge consecutive prohibition + // For each ordered pair (v, w) where {v, w} ∉ E and v ≠ w: + // x_{v,k} + x_{w,(k+1) mod n} <= 1 for all k + for v in 0..n { + for w in 0..n { + if v == w { + continue; + } + if graph.has_edge(v, w) { + continue; + } + for k in 0..n { + constraints.push(LinearConstraint::le( + vec![(x_idx(v, k), 1.0), (x_idx(w, (k + 1) % n), 1.0)], + 1.0, + )); + } + } + } + + // Constraint 4: McCormick linearization for auxiliary variables + // For each edge (u, v) at index e: + // Forward (dir=0): y = x_{u,k} * x_{v,(k+1)%n} + // Reverse (dir=1): y = x_{v,k} * x_{u,(k+1)%n} + for (e, &(u, v)) in source_edges.iter().enumerate() { + for k in 0..n { + let k_next = (k + 1) % n; + + // Forward: y_{e,k,0} = x_{u,k} * x_{v,k_next} + let y_fwd = y_idx(e, k, 0); + let xu = x_idx(u, k); + let xv_next = x_idx(v, k_next); + constraints.push(LinearConstraint::le(vec![(y_fwd, 1.0), (xu, -1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(y_fwd, 1.0), (xv_next, -1.0)], 0.0)); + constraints.push(LinearConstraint::ge( + vec![(y_fwd, 1.0), (xu, -1.0), (xv_next, -1.0)], + -1.0, + )); + + // Reverse: y_{e,k,1} = x_{v,k} * x_{u,k_next} + let y_rev = y_idx(e, k, 1); + let xv = x_idx(v, k); + let xu_next = x_idx(u, k_next); + constraints.push(LinearConstraint::le(vec![(y_rev, 1.0), (xv, -1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(y_rev, 1.0), (xu_next, -1.0)], 0.0)); + constraints.push(LinearConstraint::ge( + vec![(y_rev, 1.0), (xv, -1.0), (xu_next, -1.0)], + -1.0, + )); + } + } + + // Objective: minimize Σ_{e=(u,v)} w_e * Σ_k (y_{e,k,0} + y_{e,k,1}) + let mut objective: Vec<(usize, f64)> = Vec::new(); + for (e, &w) in edge_weights.iter().enumerate() { + for k in 0..n { + objective.push((y_idx(e, k, 0), w)); + objective.push((y_idx(e, k, 1), w)); + } + } + + let target = ILP::new(num_vars, bounds, constraints, objective, ObjectiveSense::Minimize); + + ReductionTSPToILP { + target, + num_vertices: n, + source_edges, + } + } +} + +#[cfg(test)] +#[path = "../unit_tests/rules/travelingsalesman_ilp.rs"] +mod tests; diff --git a/src/unit_tests/rules/travelingsalesman_ilp.rs b/src/unit_tests/rules/travelingsalesman_ilp.rs new file mode 100644 index 000000000..76d4654f6 --- /dev/null +++ b/src/unit_tests/rules/travelingsalesman_ilp.rs @@ -0,0 +1,150 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::SolutionSize; + +#[test] +fn test_reduction_creates_valid_ilp_c4() { + // C4 cycle: 4 vertices, 4 edges. Unique Hamiltonian cycle (the cycle itself). + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=4, m=4: num_vars = 16 + 2*4*4 = 48 + assert_eq!(ilp.num_vars, 48); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // All variables should be binary + for bound in &ilp.bounds { + assert_eq!(*bound, VarBounds::binary()); + } +} + +#[test] +fn test_reduction_c4_closed_loop() { + // C4 cycle with unit weights: optimal tour cost = 4 + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Verify extracted solution is valid on source problem + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid(), "Extracted solution must be valid"); + assert_eq!(metric, SolutionSize::Valid(4)); +} + +#[test] +fn test_reduction_k4_weighted_closed_loop() { + // K4 weighted: find minimum weight Hamiltonian cycle + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), (0, 2, 15), (0, 3, 20), + (1, 2, 35), (1, 3, 25), (2, 3, 30), + ], + ); + + // Solve via ILP reduction + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Solve via brute force for cross-check + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + let bf_metric = problem.evaluate(&bf_solutions[0]); + let ilp_metric = problem.evaluate(&extracted); + + assert!(ilp_metric.is_valid()); + assert_eq!(ilp_metric, bf_metric, "ILP and brute force must agree on optimal cost"); +} + +#[test] +fn test_reduction_c5_unweighted_closed_loop() { + // C5 cycle with unit weights + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 5, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)], + ); + + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid()); + assert_eq!(metric, SolutionSize::Valid(5)); +} + +#[test] +fn test_no_hamiltonian_cycle_infeasible() { + // Path graph 0-1-2-3: no Hamiltonian cycle exists + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3)], + ); + + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(ilp); + + assert!(result.is_none(), "Path graph should have no Hamiltonian cycle (infeasible ILP)"); +} + +#[test] +fn test_solution_extraction_structure() { + // C4 cycle: verify extraction produces correct edge selection format + let problem = TravelingSalesman::<SimpleGraph, i32>::unweighted( + 4, + vec![(0, 1), (1, 2), (2, 3), (3, 0)], + ); + let reduction: ReductionTSPToILP = ReduceTo::<ILP>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Should have one value per edge + assert_eq!(extracted.len(), 4); + // All edges should be selected (C4 has unique cycle = all edges) + assert_eq!(extracted.iter().sum::<usize>(), 4); +} + +#[test] +fn test_solve_reduced() { + // Test via ILPSolver::solve_reduced + let problem = TravelingSalesman::<SimpleGraph, i32>::new( + 4, + vec![ + (0, 1, 10), (0, 2, 15), (0, 3, 20), + (1, 2, 35), (1, 3, 25), (2, 3, 30), + ], + ); + + let ilp_solver = ILPSolver::new(); + let solution = ilp_solver.solve_reduced(&problem).expect("solve_reduced should work"); + + let metric = problem.evaluate(&solution); + assert!(metric.is_valid()); + + // Cross-check with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_best(&problem); + assert_eq!(metric, problem.evaluate(&bf_solutions[0])); +} diff --git a/tests/suites/examples.rs b/tests/suites/examples.rs index 0a394853e..362546d52 100644 --- a/tests/suites/examples.rs +++ b/tests/suites/examples.rs @@ -40,6 +40,7 @@ example_test!(reduction_satisfiability_to_maximumindependentset); example_test!(reduction_satisfiability_to_minimumdominatingset); example_test!(reduction_spinglass_to_maxcut); example_test!(reduction_spinglass_to_qubo); +example_test!(reduction_travelingsalesman_to_ilp); macro_rules! example_fn { ($test_name:ident, $mod_name:ident) => { @@ -143,3 +144,7 @@ example_fn!( ); example_fn!(test_spinglass_to_maxcut, reduction_spinglass_to_maxcut); example_fn!(test_spinglass_to_qubo, reduction_spinglass_to_qubo); +example_fn!( + test_travelingsalesman_to_ilp, + reduction_travelingsalesman_to_ilp +);