diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 32bf3d3d..78094db3 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -149,6 +149,13 @@ See Key Patterns above for solver API signatures. Follow the reference files for Unit tests in `src/unit_tests/` linked via `#[path]` (see Core Modules above). Integration tests in `tests/suites/`, consolidated through `tests/main.rs`. Example tests in `tests/suites/examples.rs` using `include!` for direct invocation. +## Documentation Locations +- `README.md` — Project overview and quickstart +- `.claude/` — Claude Code instructions and skills +- `docs/book/` — mdBook user documentation (built with `make doc`) +- `docs/paper/reductions.typ` — Typst paper with problem definitions and reduction theorems +- `examples/` — Reduction example code (also used in paper and tests) + ## Documentation Requirements **Reference:** search `docs/paper/reductions.typ` for `MinimumVertexCover` `MaximumIndependentSet` to see a complete problem-def + reduction-rule example. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d2fa032..2029f01d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: components: clippy - uses: Swatinem/rust-cache@v2 - name: Run clippy - run: cargo clippy --all-targets --all-features -- -D warnings + run: cargo clippy --all-targets --features ilp-highs -- -D warnings # Build and test test: @@ -34,13 +34,13 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build - run: cargo build --all-features --verbose + run: cargo build --features ilp-highs --verbose - name: Run tests - run: cargo test --all-features --verbose + run: cargo test --features ilp-highs --verbose - name: Run doc tests - run: cargo test --doc --all-features --verbose + run: cargo test --doc --features ilp-highs --verbose # Code coverage coverage: @@ -55,7 +55,7 @@ jobs: - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - name: Generate coverage - run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info + run: cargo llvm-cov --features ilp-highs --workspace --lcov --output-path lcov.info - name: Upload to codecov.io uses: codecov/codecov-action@v4 with: diff --git a/Cargo.toml b/Cargo.toml index c0adcf6d..1ce417b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = [".", "problemreductions-macros"] +members = [".", "problemreductions-macros", "problemreductions-cli"] [package] name = "problemreductions" @@ -12,8 +12,15 @@ keywords = ["np-hard", "optimization", "reduction", "sat", "graph"] categories = ["algorithms", "science"] [features] -default = ["ilp"] -ilp = ["good_lp"] +default = ["ilp-highs"] +ilp = ["ilp-highs"] # backward compat shorthand +ilp-solver = [] # marker: enables ILP solver code +ilp-highs = ["ilp-solver", "dep:good_lp", "good_lp/highs"] +ilp-coin-cbc = ["ilp-solver", "dep:good_lp", "good_lp/coin_cbc"] +ilp-clarabel = ["ilp-solver", "dep:good_lp", "good_lp/clarabel"] +ilp-scip = ["ilp-solver", "dep:good_lp", "good_lp/scip"] +ilp-lpsolve = ["ilp-solver", "dep:good_lp", "good_lp/lpsolve"] +ilp-microlp = ["ilp-solver", "dep:good_lp", "good_lp/microlp"] [dependencies] petgraph = { version = "0.8", features = ["serde-1"] } @@ -22,7 +29,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "2.0" num-traits = "0.2" -good_lp = { version = "1.8", default-features = false, features = ["highs"], optional = true } +good_lp = { version = "1.8", default-features = false, optional = true } inventory = "0.3" ordered-float = "5.0" rand = "0.9" diff --git a/Makefile b/Makefile index ba8af969..50dc838a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Makefile for problemreductions -.PHONY: help build test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata +.PHONY: help build test fmt clippy doc mdbook paper examples clean coverage rust-export compare qubo-testdata export-schemas release run-plan diagrams jl-testdata cli # Default target help: @@ -24,15 +24,16 @@ help: @echo " qubo-testdata - Regenerate QUBO test data (requires uv)" @echo " jl-testdata - Regenerate Julia parity test data (requires julia)" @echo " release V=x.y.z - Tag and push a new release (triggers CI publish)" + @echo " cli - Build the pred CLI tool" @echo " run-plan - Execute a plan with Claude autorun (latest plan in docs/plans/)" # Build the project build: - cargo build --all-features + cargo build --features ilp-highs # Run all tests (including ignored tests) test: - cargo test --all-features -- --include-ignored + cargo test --features ilp-highs -- --include-ignored # Format code fmt: @@ -44,14 +45,14 @@ fmt-check: # Run clippy clippy: - cargo clippy --all-targets --all-features -- -D warnings + cargo clippy --all-targets --features ilp-highs -- -D warnings # Build mdBook documentation doc: cargo run --example export_graph cargo run --example export_schemas mdbook build docs - RUSTDOCFLAGS="--default-theme=dark" cargo doc --all-features --no-deps + RUSTDOCFLAGS="--default-theme=dark" cargo doc --features ilp-highs --no-deps rm -rf docs/book/api cp -r target/doc docs/book/api @@ -70,7 +71,7 @@ diagrams: mdbook: cargo run --example export_graph cargo run --example export_schemas - RUSTDOCFLAGS="--default-theme=dark" cargo doc --all-features --no-deps + RUSTDOCFLAGS="--default-theme=dark" cargo doc --features ilp-highs --no-deps mdbook build rm -rf book/api cp -r target/doc book/api @@ -85,9 +86,9 @@ examples: @mkdir -p docs/paper/examples @for example in $(REDUCTION_EXAMPLES); do \ echo "Running $$example..."; \ - cargo run --all-features --example $$example || exit 1; \ + cargo run --features ilp-highs --example $$example || exit 1; \ done - cargo run --all-features --example export_petersen_mapping + cargo run --features ilp-highs --example export_petersen_mapping # Export problem schemas to JSON export-schemas: @@ -102,7 +103,7 @@ paper: examples # Generate coverage report (requires: cargo install cargo-llvm-cov) coverage: @command -v cargo-llvm-cov >/dev/null 2>&1 || { echo "Installing cargo-llvm-cov..."; cargo install cargo-llvm-cov; } - cargo llvm-cov --all-features --workspace --html --open + cargo llvm-cov --features ilp-highs --workspace --html --open # Clean build artifacts clean: @@ -135,6 +136,10 @@ endif git push origin main --tags @echo "v$(V) pushed — CI will publish to crates.io" +# Build the pred CLI tool +cli: + cargo build -p problemreductions-cli --release + # Generate Rust mapping JSON exports for all graphs and modes GRAPHS := diamond bull house petersen MODES := unweighted weighted triangular diff --git a/README.md b/README.md index 52a44c9b..d841c5a4 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,8 @@ Download [PDF manual](https://codingthrust.github.io/problem-reductions/reductio ## Installation +### As a library + Add to your `Cargo.toml`: ```toml @@ -22,7 +24,23 @@ Add to your `Cargo.toml`: problemreductions = "0.2" ``` -See the [Getting Started](https://codingthrust.github.io/problem-reductions/getting-started.html) guide for usage examples and the reduction workflow. +### CLI tool + +Install the `pred` command-line tool for exploring the reduction graph from your terminal: + +```bash +cargo install --git https://github.com/CodingThrust/problem-reductions problemreductions-cli +``` + +Or build from source: + +```bash +git clone https://github.com/CodingThrust/problem-reductions +cd problem-reductions +make cli # builds target/release/pred +``` + +See the [Getting Started](https://codingthrust.github.io/problem-reductions/getting-started.html) guide for usage examples, the reduction workflow, and [CLI usage](https://codingthrust.github.io/problem-reductions/cli.html). ## Contributing diff --git a/codecov.yml b/codecov.yml index 9e3e35a1..27c6f0cd 100644 --- a/codecov.yml +++ b/codecov.yml @@ -17,6 +17,10 @@ coverage: threshold: 2% # Exclude proc-macro crate from coverage since it runs at compile time -# and traditional runtime coverage tools cannot measure it +# and traditional runtime coverage tools cannot measure it. +# Exclude CLI crate from patch coverage: it is a thin dispatch layer +# with many boilerplate type-dispatch match arms that are tested +# end-to-end via integration tests, not amenable to 95% line coverage. ignore: - "problemreductions-macros/**/*" + - "problemreductions-cli/**/*" diff --git a/docs/plans/2026-02-18-cli-tool-design.md b/docs/plans/2026-02-18-cli-tool-design.md new file mode 100644 index 00000000..b17f81aa --- /dev/null +++ b/docs/plans/2026-02-18-cli-tool-design.md @@ -0,0 +1,152 @@ +# CLI Tool Design: `pred` + +## Overview + +A command-line tool for researchers and students to explore NP-hard problem reductions and solve problem instances without writing Rust code. Implemented as a separate workspace crate (`problemreductions-cli`), binary name `pred`. + +## Audience + +Researchers and students studying NP-hard reductions who want to explore and visualize without writing any Rust code. + +## Command Structure + +Subcommand-based CLI with two top-level groups: `graph` (exploration) and solve/reduce/evaluate (computation). + +### Graph Exploration + +``` +pred graph list # List all registered problems +pred graph show # Show problem details, variants, reductions +pred graph show MIS --variants # List all variants +pred graph path # Find cheapest reduction path +pred graph path MIS QUBO --cost minimize:num_vars # Custom cost function +pred graph export [--output path] # Export reduction_graph.json +``` + +### Computation + +``` +pred solve --via # Reduce + solve + map solution back +pred solve --problem MIS --edges 0-1,1-2 --via QUBO # Inline input +pred reduce --to # Reduce only, output target as JSON +pred evaluate --config 1,0,1 # Evaluate a configuration +pred schema # Show JSON schema for a problem type +``` + +### Global Flags + +- `--json` — structured JSON output, saved to file (default filename derived from command) +- `--output ` — custom output file path (used with `--json`) +- `--help` / `-h` — per-command help + +## Problem Name Resolution + +Case-insensitive matching with common aliases: + +| Input | Resolves to | +|-------|-------------| +| `MIS`, `mis` | `MaximumIndependentSet` | +| `MVC` | `MinimumVertexCover` | +| `SAT` | `Satisfiability` | +| `3SAT` | `KSatisfiability` (K=3) | +| `QUBO` | `QUBO` | +| `MaxCut` | `MaxCut` | + +Unambiguous prefix matching: `MaximumI` → `MaximumIndependentSet`, but `Maximum` is rejected (ambiguous). + +## Variant Syntax + +Slash-based positional notation after the problem name. Order follows `Problem::variant()` key order. Partial specification fills from the left; no skipping. + +``` +MIS → defaults (SimpleGraph, One) +MIS/UnitDiskGraph → UnitDiskGraph, default weight +MIS/SimpleGraph/f64 → must spell out graph to set weight +KColoring/K3 → SimpleGraph, K=3 +3SAT → alias for KSatisfiability/K3 +QUBO → no variants +``` + +## Input Formats + +### JSON Files + +Reuses the library's existing serde serialization: + +```json +{ + "problem": "MaximumIndependentSet", + "graph": {"edges": [[0,1], [1,2], [2,0]], "num_vertices": 3}, + "weights": [1, 1, 1] +} +``` + +### Inline Arguments + +For simple cases without a JSON file: + +``` +pred solve --problem MIS --edges 0-1,1-2,2-0 --weights 1,1,1 --via QUBO +``` + +## Output + +- **Human-readable (default):** plain text to stdout +- **`--json`:** structured JSON saved to file (default name derived from command, e.g., `pred_path_MIS_QUBO.json`) +- **`--json --output custom.json`:** custom output path +- **Errors:** always to stderr +- **Exit codes:** non-zero on any error + +## Architecture + +### Crate Layout + +Separate workspace crate: `problemreductions-cli/` + +``` +src/ + main.rs # Cli::parse(), dispatch to commands + cli.rs # Clap derive structs (Cli, Commands, GraphCommands) + commands/ + graph.rs # list, show, path, export + solve.rs # reduce + solve + extract solution + reduce.rs # reduce only, output target problem + evaluate.rs # evaluate a config + schema.rs # show JSON schema for a problem type + output.rs # OutputMode enum, write_json_file(), print_human() + problem_name.rs # Alias resolution + variant parsing (slash notation) +``` + +### Dependencies + +- `clap` (derive) — argument parsing +- `anyhow` — error handling +- `serde_json` — JSON I/O +- `problemreductions` — the library (all features) + +### Dynamic Dispatch + +- **Graph commands:** use `ReductionGraph` directly — already works with string names +- **Solve/reduce/evaluate:** dispatch table — a `match` over known problem names that constructs concrete types from JSON. ~20 match arms, one per problem type. + +### Error Handling + +`anyhow::Result` throughout, with `.context()` for actionable error messages. Non-zero exit code on any error. + +## V1 Scope + +### In scope + +- `pred graph list` +- `pred graph show ` (with `--variants`) +- `pred graph path ` (with `--cost`) +- `pred graph export` +- `pred solve` (JSON file and inline input, brute-force solver) +- `pred reduce` (reduce only) +- `pred evaluate` +- `pred schema` +- `--json` output to file + +### Out of scope (v2+) + +See GitHub issue for future plans. diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index f04c36c3..e77ea9d5 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -5,6 +5,7 @@ # User Guide - [Getting Started](./getting-started.md) +- [CLI Tool](./cli.md) - [Design](./design.md) # Developer Guide diff --git a/docs/src/cli.md b/docs/src/cli.md new file mode 100644 index 00000000..744f115b --- /dev/null +++ b/docs/src/cli.md @@ -0,0 +1,301 @@ +# CLI Tool + +The `pred` command-line tool lets you explore the reduction graph, create problem instances, solve problems, and perform reductions — all from your terminal. + +## Installation + +Install from the repository: + +```bash +cargo install --git https://github.com/CodingThrust/problem-reductions problemreductions-cli +``` + +Or build from source: + +```bash +git clone https://github.com/CodingThrust/problem-reductions +cd problem-reductions +make cli # builds target/release/pred +``` + +Verify the installation: + +```bash +pred --version +``` + +### ILP Backend + +The default ILP backend is HiGHS. To use a different backend: + +```bash +cargo install problemreductions-cli --features coin-cbc +cargo install problemreductions-cli --features scip +cargo install problemreductions-cli --no-default-features --features clarabel +``` + +Available backends: `highs` (default), `coin-cbc`, `clarabel`, `scip`, `lpsolve`, `microlp`. + +## Quick Start + +```bash +# Create a Maximum Independent Set problem +pred create MIS --edges 0-1,1-2,2-3 -o problem.json + +# Solve it (auto-reduces to ILP) +pred solve problem.json + +# Or solve with brute-force +pred solve problem.json --solver brute-force + +# Evaluate a specific configuration +pred evaluate problem.json --config 1,0,1,0 + +# Reduce to another problem type and solve via brute-force +pred reduce problem.json --to QUBO -o reduced.json +pred solve reduced.json --solver brute-force +``` + +## Commands + +### `pred list` — List all problem types + +Lists all registered problem types with their short aliases. + +```bash +$ pred list +Registered problems: 17 types, 48 reductions, 25 variant nodes + + CircuitSAT + Factoring + ILP + KColoring + KSatisfiability (3SAT, KSAT) + MaxCut + MaximumClique + MaximumIndependentSet (MIS) + MaximumMatching + MaximumSetPacking + MinimumDominatingSet + MinimumSetCovering + MinimumVertexCover (MVC) + QUBO + Satisfiability (SAT) + SpinGlass + TravelingSalesman (TSP) + +Use `pred show ` to see variants, reductions, and fields. +``` + +### `pred show` — Inspect a problem + +Show variants, fields, size fields, and reductions for a problem type. Use short aliases like `MIS` for `MaximumIndependentSet`. + +```bash +$ pred show MIS +MaximumIndependentSet + Find maximum weight independent set in a graph + +Variants (4): + {graph=SimpleGraph, weight=i32} + {graph=UnitDiskGraph, weight=i32} + {graph=KingsSubgraph, weight=i32} + {graph=TriangularSubgraph, weight=i32} + +Fields (2): + graph (G) -- The underlying graph G=(V,E) + weights (Vec) -- Vertex weights w: V -> R + +Size fields (2): + num_vertices + num_edges + +Reduces to (10): + MaximumIndependentSet {graph=SimpleGraph, weight=i32} -> MinimumVertexCover ... + MaximumIndependentSet {graph=SimpleGraph, weight=i32} -> ILP (default) + MaximumIndependentSet {graph=SimpleGraph, weight=i32} -> QUBO {weight=f64} + ... + +Reduces from (9): + MinimumVertexCover {graph=SimpleGraph, weight=i32} -> MaximumIndependentSet ... + Satisfiability (default) -> MaximumIndependentSet {graph=SimpleGraph, weight=i32} + ... +``` + +### `pred path` — Find a reduction path + +Find the cheapest chain of reductions between two problems: + +```bash +$ pred path MIS QUBO +Path (1 steps): MaximumIndependentSet ... → QUBO {weight: "f64"} + + Step 1: MaximumIndependentSet {graph: "SimpleGraph", weight: "i32"} → QUBO {weight: "f64"} + num_vars = num_vertices +``` + +Multi-step paths are discovered automatically: + +```bash +$ pred path Factoring SpinGlass +Path (2 steps): Factoring → CircuitSAT → SpinGlass {graph: "SimpleGraph", weight: "i32"} + + Step 1: Factoring → CircuitSAT + num_variables = num_bits_first * num_bits_second + num_assignments = num_bits_first * num_bits_second + + Step 2: CircuitSAT → SpinGlass {graph: "SimpleGraph", weight: "i32"} + num_spins = num_assignments + num_interactions = num_assignments +``` + +Show all paths or save for later use with `pred reduce --via`: + +```bash +pred path MIS QUBO --all # all paths +pred path MIS QUBO -o path.json # save path for `pred reduce --via` +pred path MIS QUBO --all -o paths/ # save all paths to a folder +``` + +Use `--cost` to change the optimization strategy: + +```bash +pred path MIS QUBO --cost minimize-steps # default +pred path MIS QUBO --cost minimize:num_variables # minimize a size field +``` + +Use `pred show ` to see which size fields are available. + +### `pred export-graph` — Export the reduction graph + +Export the full reduction graph as JSON: + +```bash +pred export-graph reduction_graph.json +``` + +### `pred create` — Create a problem instance + +Construct a problem instance from CLI arguments and save as JSON: + +```bash +pred create MIS --edges 0-1,1-2,2-3 -o problem.json +pred create MIS --edges 0-1,1-2,2-3 --weights 2,1,3,1 -o problem.json +pred create SAT --num-vars 3 --clauses "1,2;-1,3" -o sat.json +pred create QUBO --matrix "1,0.5;0.5,2" -o qubo.json +pred create KColoring --k 3 --edges 0-1,1-2,2-0 -o kcol.json +pred create SpinGlass --edges 0-1,1-2 -o sg.json +pred create MaxCut --edges 0-1,1-2,2-0 -o maxcut.json +``` + +The output file uses a standard wrapper format: + +```json +{ + "type": "MaximumIndependentSet", + "variant": {"graph": "SimpleGraph", "weight": "i32"}, + "data": { ... } +} +``` + +### `pred evaluate` — Evaluate a configuration + +Evaluate a configuration against a problem instance: + +```bash +$ pred evaluate problem.json --config 1,0,1,0 +Valid(2) +``` + +### `pred reduce` — Reduce a problem + +Reduce a problem to a target type. Outputs a reduction bundle containing source, target, and path: + +```bash +pred reduce problem.json --to QUBO -o reduced.json +``` + +Use a specific reduction path (from `pred path -o`): + +```bash +pred reduce problem.json --to QUBO --via path.json -o reduced.json +``` + +Without `-o`, the bundle JSON is printed to stdout: + +```bash +pred reduce problem.json --to QUBO +``` + +The bundle contains everything needed to map solutions back: + +```json +{ + "source": { "type": "MaximumIndependentSet", "variant": {...}, "data": {...} }, + "target": { "type": "QUBO", "variant": {...}, "data": {...} }, + "path": [ + {"name": "MaximumIndependentSet", "variant": {"graph": "SimpleGraph", "weight": "i32"}}, + {"name": "QUBO", "variant": {"weight": "f64"}} + ] +} +``` + +### `pred solve` — Solve a problem + +Solve a problem instance using ILP (default) or brute-force: + +```bash +pred solve problem.json # ILP solver (default) +pred solve problem.json --solver brute-force # brute-force solver +``` + +When the problem is not ILP, the solver automatically reduces it to ILP, solves, and maps the solution back: + +```bash +$ pred solve problem.json +Problem: MaximumIndependentSet (reduced to ILP) +Solver: ilp +Solution: [1, 0, 0, 1] +Evaluation: Valid(2) +``` + +Solve a reduction bundle (from `pred reduce`): + +```bash +$ pred solve reduced.json --solver brute-force +Source: MaximumIndependentSet +Target: QUBO (solved with brute-force) +Target solution: [0, 1, 0, 1] +Target evaluation: Valid(-2.0) +Source solution: [0, 1, 0, 1] +Source evaluation: Valid(2) +``` + +> **Note:** The ILP solver requires a reduction path from the target problem to ILP. +> Some problems (e.g., QUBO, SpinGlass, MaxCut, CircuitSAT) do not have this path yet. +> Use `--solver brute-force` for these, or reduce to a problem that supports ILP first. + +## JSON Output + +All commands support `-o` to write JSON output to a file: + +```bash +pred list -o problems.json +pred show MIS -o mis.json +pred path MIS QUBO -o path.json +pred solve problem.json -o solution.json +``` + +## Problem Name Aliases + +You can use short aliases instead of full problem names (shown in `pred list`): + +| Alias | Full Name | +|-------|-----------| +| `MIS` | `MaximumIndependentSet` | +| `MVC` | `MinimumVertexCover` | +| `SAT` | `Satisfiability` | +| `3SAT` / `KSAT` | `KSatisfiability` | +| `TSP` | `TravelingSalesman` | + +You can also specify variants with a slash: `MIS/UnitDiskGraph`, `SpinGlass/SimpleGraph`. diff --git a/docs/src/getting-started.md b/docs/src/getting-started.md index ebd577b5..49e502b5 100644 --- a/docs/src/getting-started.md +++ b/docs/src/getting-started.md @@ -212,6 +212,7 @@ The library exports machine-readable metadata useful for tooling and research: ## Next Steps +- Try the [CLI tool](./cli.md) to explore problems and reduction paths from your terminal - Explore the [interactive reduction graph](./introduction.html) to discover available reductions - Read the [Design](./design.md) guide for implementation details - Browse the [API Reference](./api.html) for full documentation diff --git a/docs/src/introduction.md b/docs/src/introduction.md index 2765bf7b..42b49d4e 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -34,7 +34,7 @@
-For theoretical background and correctness proofs, see the [PDF manual](https://codingthrust.github.io/problem-reductions/reductions.pdf). +You can also explore this graph from the terminal with the [CLI tool](./cli.md). For theoretical background and correctness proofs, see the [PDF manual](https://codingthrust.github.io/problem-reductions/reductions.pdf). ## Our Vision diff --git a/examples/chained_reduction_ksat_to_mis.rs b/examples/chained_reduction_ksat_to_mis.rs index 9f662358..d253cd21 100644 --- a/examples/chained_reduction_ksat_to_mis.rs +++ b/examples/chained_reduction_ksat_to_mis.rs @@ -1,6 +1,6 @@ -// # Chained Reduction: 3-SAT → MIS via Executable Paths +// # Chained Reduction: 3-SAT → MIS via Reduction Chains // -// Demonstrates the `find_cheapest_path` + `make_executable` API to chain +// Demonstrates the `find_cheapest_path` + `reduce_along_path` API to chain // reductions automatically: KSatisfiability → Satisfiability → MIS. // The target MIS is then solved via `ILPSolver::solve_reduced`. @@ -31,9 +31,6 @@ pub fn run() { &MinimizeSteps, ) .unwrap(); - let path = graph - .make_executable::, MaximumIndependentSet>(&rpath) - .unwrap(); // Create: 3-SAT formula (a∨b∨¬c)∧(¬a∨¬b∨¬c)∧(¬a∨b∨c)∧(a∨¬b∨c) let ksat = KSatisfiability::::new( @@ -46,14 +43,16 @@ pub fn run() { ], ); - // Reduce: the executable path handles all intermediate steps - let reduction = path.reduce(&ksat); - let target = reduction.target_problem(); + // Reduce: the reduction chain handles all intermediate steps + let chain = graph + .reduce_along_path(&rpath, &ksat as &dyn std::any::Any) + .unwrap(); + let target: &MaximumIndependentSet = chain.target_problem(); // Solve the target MIS via ILP let solver = ILPSolver::new(); let solution = solver.solve_reduced(target).unwrap(); - let original = reduction.extract_solution(&solution); + let original = chain.extract_solution(&solution); // Verify: satisfies the original 3-SAT formula assert!(ksat.evaluate(&original)); diff --git a/problemreductions-cli/Cargo.toml b/problemreductions-cli/Cargo.toml new file mode 100644 index 00000000..dffcf5d2 --- /dev/null +++ b/problemreductions-cli/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "problemreductions-cli" +version = "0.1.0" +edition = "2021" +description = "CLI tool for exploring NP-hard problem reductions" +license = "MIT" + +[[bin]] +name = "pred" +path = "src/main.rs" + +[features] +default = ["highs"] +highs = ["problemreductions/ilp-highs"] +coin-cbc = ["problemreductions/ilp-coin-cbc"] +clarabel = ["problemreductions/ilp-clarabel"] +scip = ["problemreductions/ilp-scip"] +lpsolve = ["problemreductions/ilp-lpsolve"] +microlp = ["problemreductions/ilp-microlp"] + +[dependencies] +problemreductions = { path = "..", default-features = false } +clap = { version = "4", features = ["derive"] } +anyhow = "1" +serde = { version = "1", features = ["derive"] } +serde_json = "1" diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs new file mode 100644 index 00000000..724859e4 --- /dev/null +++ b/problemreductions-cli/src/cli.rs @@ -0,0 +1,229 @@ +use clap::{CommandFactory, Parser, Subcommand}; +use std::path::PathBuf; + +#[derive(Parser)] +#[command( + name = "pred", + about = "Explore NP-hard problem reductions", + version, + after_help = "\ +Typical workflow: + pred create MIS --edges 0-1,1-2,2-3 -o problem.json + pred solve problem.json + pred evaluate problem.json --config 1,0,1,0 + +Use `pred --help` for detailed usage of each command. +Use `pred list` to see all available problem types." +)] +pub struct Cli { + /// Output file path (implies JSON output) + #[arg(long, short, global = true)] + pub output: Option, + + #[command(subcommand)] + pub command: Commands, +} + +#[derive(Subcommand)] +pub enum Commands { + /// List all registered problem types + #[command(after_help = "\ +Examples: + pred list # print to terminal + pred list -o problems.json # save as JSON")] + List, + + /// Show details for a problem type (variants, fields, reductions) + #[command(after_help = "\ +Examples: + pred show MIS # using alias + pred show MaximumIndependentSet # full name + pred show MIS/UnitDiskGraph # specific graph variant + +Use `pred list` to see all available problem types and aliases.")] + Show { + /// Problem name or alias (e.g., MIS, QUBO, MIS/UnitDiskGraph) + problem: String, + }, + + /// Find the cheapest reduction path between two problems + #[command(after_help = "\ +Examples: + pred path MIS QUBO # cheapest path + pred path MIS QUBO --all # all paths + pred path MIS QUBO -o path.json # save for `pred reduce --via` + pred path MIS QUBO --all -o paths/ # save all paths to a folder + pred path MIS QUBO --cost minimize:num_variables + +Use `pred list` to see available problems.")] + Path { + /// Source problem (e.g., MIS, MIS/UnitDiskGraph) + source: String, + /// Target problem (e.g., QUBO) + target: String, + /// Cost function [default: minimize-steps] + #[arg(long, default_value = "minimize-steps")] + cost: String, + /// Show all paths instead of just the cheapest + #[arg(long)] + all: bool, + }, + + /// Export the reduction graph to JSON + #[command(after_help = "\ +Example: + pred export-graph reduction_graph.json")] + ExportGraph { + /// Output file path + output: PathBuf, + }, + + /// Create a problem instance and save as JSON + Create(CreateArgs), + /// Evaluate a configuration against a problem instance JSON file + Evaluate(EvaluateArgs), + /// Reduce a problem instance to a target type + Reduce(ReduceArgs), + /// Solve a problem instance + Solve(SolveArgs), +} + +#[derive(clap::Args)] +#[command(after_help = "\ +Options by problem type: + Graph problems (MIS, MVC, MaxCut, MaxClique, ...): + --edges Edge list, e.g., 0-1,1-2,2-3 [required] + --weights Vertex weights, e.g., 2,1,3,1 [default: all 1s] + SAT problems (SAT, 3SAT, KSAT): + --num-vars Number of variables [required] + --clauses Semicolon-separated clauses, e.g., \"1,2;-1,3\" [required] + QUBO: + --matrix Semicolon-separated rows, e.g., \"1,0.5;0.5,2\" [required] + KColoring: + --edges Edge list [required] + --k Number of colors [required] + +Examples: + pred create MIS --edges 0-1,1-2,2-3 -o problem.json + pred create MIS --edges 0-1,1-2 --weights 2,1,3 -o weighted.json + pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" -o sat.json + pred create QUBO --matrix \"1,0.5;0.5,2\" -o qubo.json + pred create KColoring --k 3 --edges 0-1,1-2,2-0 -o kcol.json + +Output (`-o`) uses the standard problem JSON format: + {\"type\": \"...\", \"variant\": {...}, \"data\": {...}}")] +pub struct CreateArgs { + /// Problem type (e.g., MIS, QUBO, SAT) + pub problem: String, + /// Edges for graph problems (e.g., 0-1,1-2,2-3) + #[arg(long)] + pub edges: Option, + /// Vertex weights (e.g., 1,1,1,1) [default: all 1s] + #[arg(long)] + pub weights: Option, + /// Clauses for SAT problems (semicolon-separated, e.g., "1,2;-1,3") + #[arg(long)] + pub clauses: Option, + /// Number of variables (for SAT/KSAT) + #[arg(long)] + pub num_vars: Option, + /// Matrix for QUBO (semicolon-separated rows, e.g., "1,0.5;0.5,2") + #[arg(long)] + pub matrix: Option, + /// Number of colors for KColoring + #[arg(long)] + pub k: Option, +} + +#[derive(clap::Args)] +#[command(after_help = "\ +Examples: + pred solve problem.json # ILP solver (default, auto-reduces to ILP) + pred solve problem.json --solver brute-force # brute-force (exhaustive search) + pred solve reduced.json # solve a reduction bundle + pred solve reduced.json -o solution.json # save result to file + +Typical workflow: + pred create MIS --edges 0-1,1-2,2-3 -o problem.json + pred solve problem.json + +Solve via explicit reduction: + pred reduce problem.json --to QUBO -o reduced.json + pred solve reduced.json + +Input: a problem JSON from `pred create`, or a reduction bundle from `pred reduce`. +When given a bundle, the target is solved and the solution is mapped back to the source. +The ILP solver auto-reduces non-ILP problems before solving. + +ILP backend (default: HiGHS). To use a different backend: + cargo install problemreductions-cli --features coin-cbc + cargo install problemreductions-cli --features scip + cargo install problemreductions-cli --no-default-features --features clarabel")] +pub struct SolveArgs { + /// Problem JSON file (from `pred create`) or reduction bundle (from `pred reduce`) + pub input: PathBuf, + /// Solver: ilp (default) or brute-force + #[arg(long, default_value = "ilp")] + pub solver: String, +} + +#[derive(clap::Args)] +#[command(after_help = "\ +Examples: + pred reduce problem.json --to QUBO -o reduced.json + pred reduce problem.json --to ILP -o reduced.json + pred reduce problem.json --to QUBO --via path.json -o reduced.json + +Input: a problem JSON from `pred create`. +The --via path file is from `pred path -o path.json`. +Output is a reduction bundle with source, target, and path. +Use `pred solve reduced.json` to solve and map the solution back.")] +pub struct ReduceArgs { + /// Problem JSON file (from `pred create`) + pub input: PathBuf, + /// Target problem type (e.g., QUBO, SpinGlass) + #[arg(long)] + pub to: String, + /// Reduction route file (from `pred path ... -o`) + #[arg(long)] + pub via: Option, +} + +#[derive(clap::Args)] +#[command(after_help = "\ +Examples: + pred evaluate problem.json --config 1,0,1,0 + pred evaluate problem.json --config 1,0,1,0 -o result.json + +Input: a problem JSON from `pred create`.")] +pub struct EvaluateArgs { + /// Problem JSON file (from `pred create`) + pub input: PathBuf, + /// Configuration to evaluate (comma-separated, e.g., 1,0,1,0) + #[arg(long)] + pub config: String, +} + +/// Print the after_help text for a subcommand on parse error. +pub fn print_subcommand_help_hint(error_msg: &str) { + let subcmds = [ + ("pred solve", "solve"), + ("pred reduce", "reduce"), + ("pred create", "create"), + ("pred evaluate", "evaluate"), + ("pred path", "path"), + ("pred show", "show"), + ("pred export-graph", "export-graph"), + ]; + let cmd = Cli::command(); + for (pattern, name) in subcmds { + if error_msg.contains(pattern) { + if let Some(sub) = cmd.find_subcommand(name) { + if let Some(help) = sub.get_after_help() { + eprintln!("\n{help}"); + } + } + return; + } + } +} diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs new file mode 100644 index 00000000..d8c044f5 --- /dev/null +++ b/problemreductions-cli/src/commands/create.rs @@ -0,0 +1,303 @@ +use crate::cli::CreateArgs; +use crate::dispatch::ProblemJsonOutput; +use crate::output::OutputConfig; +use crate::problem_name::resolve_alias; +use anyhow::{bail, Result}; +use problemreductions::prelude::*; +use problemreductions::topology::{Graph, SimpleGraph}; +use problemreductions::variant::{K2, K3, KN}; +use serde::Serialize; +use std::collections::BTreeMap; + +pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { + let canonical = resolve_alias(&args.problem); + + let (data, variant) = match canonical.as_str() { + // Graph problems with vertex weights + "MaximumIndependentSet" | "MinimumVertexCover" | "MaximumClique" + | "MinimumDominatingSet" => { + let (graph, n) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create {} --edges 0-1,1-2,2-3 [--weights 1,1,1,1] --json -o problem.json", + args.problem + ) + })?; + let weights = parse_vertex_weights(args, n)?; + let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let data = match canonical.as_str() { + "MaximumIndependentSet" => { + ser(MaximumIndependentSet::new(graph, weights))? + } + "MinimumVertexCover" => { + ser(MinimumVertexCover::new(graph, weights))? + } + "MaximumClique" => ser(MaximumClique::new(graph, weights))?, + "MinimumDominatingSet" => { + ser(MinimumDominatingSet::new(graph, weights))? + } + _ => unreachable!(), + }; + (data, variant) + } + + // Graph problems with edge weights + "MaxCut" | "MaximumMatching" | "TravelingSalesman" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create {} --edges 0-1,1-2,2-3 [--weights 1,1,1] --json -o problem.json", + args.problem + ) + })?; + let edge_weights = parse_edge_weights(args, graph.num_edges())?; + let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + let data = match canonical.as_str() { + "MaxCut" => ser(MaxCut::new(graph, edge_weights))?, + "MaximumMatching" => ser(MaximumMatching::new(graph, edge_weights))?, + "TravelingSalesman" => ser(TravelingSalesman::new(graph, edge_weights))?, + _ => unreachable!(), + }; + (data, variant) + } + + // KColoring + "KColoring" => { + let (graph, _) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create KColoring --edges 0-1,1-2,2-0 --k 3 --json -o problem.json" + ) + })?; + let variant; + let data; + match args.k { + Some(2) => { + variant = variant_map(&[("k", "K2"), ("graph", "SimpleGraph")]); + data = ser(KColoring::::new(graph))?; + } + Some(3) => { + variant = variant_map(&[("k", "K3"), ("graph", "SimpleGraph")]); + data = ser(KColoring::::new(graph))?; + } + Some(k) => { + variant = variant_map(&[("k", "KN"), ("graph", "SimpleGraph")]); + data = ser(KColoring::::with_k(graph, k))?; + } + None => bail!( + "KColoring requires --k \n\n\ + Usage: pred create KColoring --edges 0-1,1-2,2-0 --k 3 --json -o problem.json" + ), + } + (data, variant) + } + + // SAT + "Satisfiability" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "Satisfiability requires --num-vars\n\n\ + Usage: pred create SAT --num-vars 3 --clauses \"1,2;-1,3\" --json -o problem.json" + ) + })?; + let clauses = parse_clauses(args)?; + let variant = BTreeMap::new(); + (ser(Satisfiability::new(num_vars, clauses))?, variant) + } + "KSatisfiability" => { + let num_vars = args.num_vars.ok_or_else(|| { + anyhow::anyhow!( + "KSatisfiability requires --num-vars\n\n\ + Usage: pred create 3SAT --num-vars 3 --clauses \"1,2,3;-1,2,-3\" --json -o problem.json" + ) + })?; + let clauses = parse_clauses(args)?; + let variant; + let data; + match args.k { + Some(2) => { + variant = variant_map(&[("k", "K2")]); + data = ser(KSatisfiability::::new(num_vars, clauses))?; + } + Some(3) => { + variant = variant_map(&[("k", "K3")]); + data = ser(KSatisfiability::::new(num_vars, clauses))?; + } + _ => { + variant = variant_map(&[("k", "KN")]); + data = ser(KSatisfiability::::new(num_vars, clauses))?; + } + } + (data, variant) + } + + // QUBO + "QUBO" => { + let matrix = parse_matrix(args)?; + let variant = BTreeMap::new(); + (ser(QUBO::from_matrix(matrix))?, variant) + } + + // SpinGlass + "SpinGlass" => { + let (graph, n) = parse_graph(args).map_err(|e| { + anyhow::anyhow!( + "{e}\n\nUsage: pred create SpinGlass --edges 0-1,1-2 [--weights 1,1] --json -o problem.json" + ) + })?; + let edge_weights = parse_edge_weights(args, graph.num_edges())?; + let fields = vec![0i32; n]; + let couplings = edge_weights; + let variant = variant_map(&[("graph", "SimpleGraph"), ("weight", "i32")]); + ( + ser(SpinGlass::from_graph(graph, couplings, fields))?, + variant, + ) + } + + // Factoring + "Factoring" => { + bail!("Factoring requires complex construction — use a JSON file instead"); + } + + _ => bail!( + "Unknown or unsupported problem type for create: {}", + canonical + ), + }; + + let output = ProblemJsonOutput { + problem_type: canonical.clone(), + variant, + data, + }; + + let json = serde_json::to_value(&output)?; + let text = format!("Created {} instance", canonical); + let default_name = format!("pred_{}.json", canonical.to_lowercase()); + out.emit_with_default_name(&default_name, &text, &json) +} + +fn ser(problem: T) -> Result { + Ok(serde_json::to_value(problem)?) +} + +fn variant_map(pairs: &[(&str, &str)]) -> BTreeMap { + pairs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() +} + +/// Parse `--edges` into a SimpleGraph, inferring num_vertices from max index. +fn parse_graph(args: &CreateArgs) -> Result<(SimpleGraph, usize)> { + let edges_str = args + .edges + .as_deref() + .ok_or_else(|| anyhow::anyhow!("This problem requires --edges (e.g., 0-1,1-2,2-3)"))?; + + let edges: Vec<(usize, usize)> = edges_str + .split(',') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split('-').collect(); + if parts.len() != 2 { + bail!("Invalid edge '{}': expected format u-v", pair.trim()); + } + let u: usize = parts[0].parse()?; + let v: usize = parts[1].parse()?; + Ok((u, v)) + }) + .collect::>>()?; + + let num_vertices = edges + .iter() + .flat_map(|(u, v)| [*u, *v]) + .max() + .map(|m| m + 1) + .unwrap_or(0); + + Ok((SimpleGraph::new(num_vertices, edges), num_vertices)) +} + +/// Parse `--weights` as vertex weights (i32), defaulting to all 1s. +fn parse_vertex_weights(args: &CreateArgs, num_vertices: usize) -> Result> { + match &args.weights { + Some(w) => { + let weights: Vec = w + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>()?; + if weights.len() != num_vertices { + bail!( + "Expected {} weights but got {}", + num_vertices, + weights.len() + ); + } + Ok(weights) + } + None => Ok(vec![1i32; num_vertices]), + } +} + +/// Parse `--weights` as edge weights (i32), defaulting to all 1s. +fn parse_edge_weights(args: &CreateArgs, num_edges: usize) -> Result> { + match &args.weights { + Some(w) => { + let weights: Vec = w + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>()?; + if weights.len() != num_edges { + bail!( + "Expected {} edge weights but got {}", + num_edges, + weights.len() + ); + } + Ok(weights) + } + None => Ok(vec![1i32; num_edges]), + } +} + +/// Parse `--clauses` as semicolon-separated clauses of comma-separated literals. +/// E.g., "1,2;-1,3;2,-3" +fn parse_clauses(args: &CreateArgs) -> Result> { + let clauses_str = args + .clauses + .as_deref() + .ok_or_else(|| anyhow::anyhow!("SAT problems require --clauses (e.g., \"1,2;-1,3\")"))?; + + clauses_str + .split(';') + .map(|clause| { + let literals: Vec = clause + .trim() + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>()?; + Ok(CNFClause::new(literals)) + }) + .collect() +} + +/// Parse `--matrix` as semicolon-separated rows of comma-separated f64 values. +/// E.g., "1,0.5;0.5,2" +fn parse_matrix(args: &CreateArgs) -> Result>> { + let matrix_str = args + .matrix + .as_deref() + .ok_or_else(|| anyhow::anyhow!("QUBO requires --matrix (e.g., \"1,0.5;0.5,2\")"))?; + + matrix_str + .split(';') + .map(|row| { + row.trim() + .split(',') + .map(|s| { + s.trim() + .parse::() + .map_err(|e| anyhow::anyhow!("Invalid matrix value: {}", e)) + }) + .collect() + }) + .collect() +} diff --git a/problemreductions-cli/src/commands/evaluate.rs b/problemreductions-cli/src/commands/evaluate.rs new file mode 100644 index 00000000..13a7b34c --- /dev/null +++ b/problemreductions-cli/src/commands/evaluate.rs @@ -0,0 +1,44 @@ +use crate::dispatch::{load_problem, ProblemJson}; +use crate::output::OutputConfig; +use anyhow::Result; +use std::path::Path; + +pub fn evaluate(input: &Path, config_str: &str, out: &OutputConfig) -> Result<()> { + let content = std::fs::read_to_string(input)?; + let problem_json: ProblemJson = serde_json::from_str(&content)?; + + let problem = load_problem( + &problem_json.problem_type, + &problem_json.variant, + problem_json.data, + )?; + + let config: Vec = config_str + .split(',') + .map(|s| { + s.trim() + .parse::() + .map_err(|e| anyhow::anyhow!("Invalid config value '{}': {}", s.trim(), e)) + }) + .collect::>>()?; + + let dims = problem.dims_dyn(); + if config.len() != dims.len() { + anyhow::bail!( + "Config has {} values but problem has {} variables", + config.len(), + dims.len() + ); + } + + let result = problem.evaluate_dyn(&config); + + let text = result.to_string(); + let json = serde_json::json!({ + "problem": problem.problem_name(), + "config": config, + "result": result, + }); + + out.emit_with_default_name("pred_evaluate.json", &text, &json) +} diff --git a/problemreductions-cli/src/commands/graph.rs b/problemreductions-cli/src/commands/graph.rs new file mode 100644 index 00000000..f8e31bdb --- /dev/null +++ b/problemreductions-cli/src/commands/graph.rs @@ -0,0 +1,404 @@ +use crate::output::OutputConfig; +use crate::problem_name::{aliases_for, parse_problem_spec, resolve_variant}; +use anyhow::{Context, Result}; +use problemreductions::registry::collect_schemas; +use problemreductions::rules::{Minimize, MinimizeSteps, ReductionGraph}; +use problemreductions::types::ProblemSize; +use std::collections::BTreeMap; +use std::path::Path; + +pub fn list(out: &OutputConfig) -> Result<()> { + let graph = ReductionGraph::new(); + + let mut types = graph.problem_types(); + types.sort(); + + let mut text = format!( + "Registered problems: {} types, {} reductions, {} variant nodes\n\n", + graph.num_types(), + graph.num_reductions(), + graph.num_variant_nodes(), + ); + + for name in &types { + let aliases = aliases_for(name); + if aliases.is_empty() { + text.push_str(&format!(" {name}\n")); + } else { + text.push_str(&format!(" {name} ({})\n", aliases.join(", "))); + } + } + + text.push_str("\nUse `pred show ` to see variants, reductions, and fields.\n"); + + let json = serde_json::json!({ + "num_types": graph.num_types(), + "num_reductions": graph.num_reductions(), + "num_variant_nodes": graph.num_variant_nodes(), + "problems": types.iter().map(|name| { + let aliases = aliases_for(name); + serde_json::json!({ "name": name, "aliases": aliases }) + }).collect::>(), + }); + + out.emit_with_default_name("pred_graph_list.json", &text, &json) +} + +pub fn show(problem: &str, out: &OutputConfig) -> Result<()> { + let spec = parse_problem_spec(problem)?; + let graph = ReductionGraph::new(); + + let variants = graph.variants_for(&spec.name); + if variants.is_empty() { + anyhow::bail!("Unknown problem: {}", spec.name); + } + + let mut text = format!("{}\n", spec.name); + + // Show description from schema + let schemas = collect_schemas(); + let schema = schemas.iter().find(|s| s.name == spec.name); + if let Some(s) = schema { + if !s.description.is_empty() { + text.push_str(&format!(" {}\n", s.description)); + } + } + + // Show variants + text.push_str(&format!("\nVariants ({}):\n", variants.len())); + for v in &variants { + text.push_str(&format!(" {}\n", format_variant(v))); + } + + // Show fields from schema (right after variants) + if let Some(s) = schema { + text.push_str(&format!("\nFields ({}):\n", s.fields.len())); + for field in &s.fields { + text.push_str(&format!(" {} ({})", field.name, field.type_name)); + if !field.description.is_empty() { + text.push_str(&format!(" -- {}", field.description)); + } + text.push('\n'); + } + } + + // Show size fields (used with `pred path --cost minimize:`) + let size_fields = graph.size_field_names(&spec.name); + if !size_fields.is_empty() { + text.push_str(&format!("\nSize fields ({}):\n", size_fields.len())); + for f in size_fields { + text.push_str(&format!(" {f}\n")); + } + } + + // Show reductions from/to this problem + let outgoing = graph.outgoing_reductions(&spec.name); + let incoming = graph.incoming_reductions(&spec.name); + + text.push_str(&format!("\nReduces to ({}):\n", outgoing.len())); + for e in &outgoing { + text.push_str(&format!( + " {} {} -> {} {}\n", + e.source_name, + format_variant(&e.source_variant), + e.target_name, + format_variant(&e.target_variant), + )); + } + + text.push_str(&format!("\nReduces from ({}):\n", incoming.len())); + for e in &incoming { + text.push_str(&format!( + " {} {} -> {} {}\n", + e.source_name, + format_variant(&e.source_variant), + e.target_name, + format_variant(&e.target_variant), + )); + } + + let mut json = serde_json::json!({ + "name": spec.name, + "variants": variants, + "size_fields": size_fields, + "reduces_to": outgoing.iter().map(|e| { + serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) + }).collect::>(), + "reduces_from": incoming.iter().map(|e| { + serde_json::json!({"source": {"name": e.source_name, "variant": e.source_variant}, "target": {"name": e.target_name, "variant": e.target_variant}}) + }).collect::>(), + }); + if let Some(s) = schema { + if let (Some(obj), Ok(schema_val)) = (json.as_object_mut(), serde_json::to_value(s)) { + obj.insert("schema".to_string(), schema_val); + } + } + + let default_name = format!("pred_show_{}.json", spec.name); + out.emit_with_default_name(&default_name, &text, &json) +} + +fn format_variant(v: &BTreeMap) -> String { + if v.is_empty() { + "(default)".to_string() + } else { + let pairs: Vec = v.iter().map(|(k, val)| format!("{k}={val}")).collect(); + format!("{{{}}}", pairs.join(", ")) + } +} + +fn format_path_text( + graph: &ReductionGraph, + reduction_path: &problemreductions::rules::ReductionPath, +) -> String { + let mut text = format!( + "Path ({} steps): {}\n", + reduction_path.len(), + reduction_path + ); + + let overheads = graph.path_overheads(reduction_path); + let steps = &reduction_path.steps; + for i in 0..steps.len().saturating_sub(1) { + let from = &steps[i]; + let to = &steps[i + 1]; + text.push_str(&format!("\n Step {}: {} → {}\n", i + 1, from, to)); + let oh = &overheads[i]; + for (field, poly) in &oh.output_size { + text.push_str(&format!(" {field} = {poly}\n")); + } + } + + text +} + +fn format_path_json( + graph: &ReductionGraph, + reduction_path: &problemreductions::rules::ReductionPath, +) -> serde_json::Value { + let overheads = graph.path_overheads(reduction_path); + let steps_json: Vec = reduction_path + .steps + .windows(2) + .zip(overheads.iter()) + .enumerate() + .map(|(i, (pair, oh))| { + serde_json::json!({ + "from": {"name": pair[0].name, "variant": pair[0].variant}, + "to": {"name": pair[1].name, "variant": pair[1].variant}, + "step": i + 1, + "overhead": oh.output_size.iter().map(|(field, poly)| { + serde_json::json!({"field": field, "formula": poly.to_string()}) + }).collect::>(), + }) + }) + .collect(); + + serde_json::json!({ + "steps": reduction_path.len(), + "path": steps_json, + }) +} + +pub fn path(source: &str, target: &str, cost: &str, all: bool, out: &OutputConfig) -> Result<()> { + let src_spec = parse_problem_spec(source)?; + let dst_spec = parse_problem_spec(target)?; + let graph = ReductionGraph::new(); + + let src_variants = graph.variants_for(&src_spec.name); + let dst_variants = graph.variants_for(&dst_spec.name); + + if src_variants.is_empty() { + anyhow::bail!( + "Unknown source problem: {}\n\n\ + Usage: pred path \n\ + Example: pred path MIS QUBO\n\n\ + Run `pred list` to see all available problems.", + src_spec.name + ); + } + if dst_variants.is_empty() { + anyhow::bail!( + "Unknown target problem: {}\n\n\ + Usage: pred path \n\ + Example: pred path MIS QUBO\n\n\ + Run `pred list` to see all available problems.", + dst_spec.name + ); + } + + if all { + // --all uses only the specified variant or the first (default) one + let sv = if src_spec.variant_values.is_empty() { + src_variants[0].clone() + } else { + resolve_variant(&src_spec, &src_variants)? + }; + let dv = if dst_spec.variant_values.is_empty() { + dst_variants[0].clone() + } else { + resolve_variant(&dst_spec, &dst_variants)? + }; + return path_all(&graph, &src_spec.name, &sv, &dst_spec.name, &dv, out); + } + + let src_resolved = if src_spec.variant_values.is_empty() { + src_variants.clone() + } else { + vec![resolve_variant(&src_spec, &src_variants)?] + }; + let dst_resolved = if dst_spec.variant_values.is_empty() { + dst_variants.clone() + } else { + vec![resolve_variant(&dst_spec, &dst_variants)?] + }; + + let input_size = ProblemSize::new(vec![]); + + // Parse cost function once (validate before the search loop) + enum CostChoice { + Steps, + Field(&'static str), + } + let cost_choice = if cost == "minimize-steps" { + CostChoice::Steps + } else if let Some(field) = cost.strip_prefix("minimize:") { + // Leak the field name to get &'static str (fine for a CLI that exits immediately) + CostChoice::Field(Box::leak(field.to_string().into_boxed_str())) + } else { + anyhow::bail!( + "Unknown cost function: {}. Use 'minimize-steps' or 'minimize:'", + cost + ); + }; + + let mut best_path: Option = None; + + for sv in &src_resolved { + for dv in &dst_resolved { + let found = match cost_choice { + CostChoice::Steps => graph.find_cheapest_path( + &src_spec.name, sv, &dst_spec.name, dv, &input_size, &MinimizeSteps, + ), + CostChoice::Field(f) => graph.find_cheapest_path( + &src_spec.name, sv, &dst_spec.name, dv, &input_size, &Minimize(f), + ), + }; + + if let Some(p) = found { + let is_better = best_path.as_ref().is_none_or(|bp| p.len() < bp.len()); + if is_better { + best_path = Some(p); + } + } + } + } + + match best_path { + Some(ref reduction_path) => { + let text = format_path_text(&graph, reduction_path); + if let Some(ref path) = out.output { + let json = format_path_json(&graph, reduction_path); + let content = + serde_json::to_string_pretty(&json).context("Failed to serialize JSON")?; + std::fs::write(path, &content) + .with_context(|| format!("Failed to write {}", path.display()))?; + eprintln!("Wrote {}", path.display()); + } else { + println!("{text}"); + } + Ok(()) + } + None => { + anyhow::bail!( + "No reduction path from {} to {}\n\n\ + Usage: pred path \n\ + Example: pred path MIS QUBO\n\n\ + Run `pred show {}` and `pred show {}` to check available reductions.", + src_spec.name, + dst_spec.name, + src_spec.name, + dst_spec.name, + ); + } + } +} + +fn path_all( + graph: &ReductionGraph, + src_name: &str, + src_variant: &BTreeMap, + dst_name: &str, + dst_variant: &BTreeMap, + out: &OutputConfig, +) -> Result<()> { + let mut all_paths = graph.find_all_paths(src_name, src_variant, dst_name, dst_variant); + + if all_paths.is_empty() { + anyhow::bail!( + "No reduction path from {} to {}\n\n\ + Usage: pred path --all\n\ + Example: pred path MIS QUBO --all\n\n\ + Run `pred show {}` and `pred show {}` to check available reductions.", + src_name, + dst_name, + src_name, + dst_name, + ); + } + + // Sort by path length (shortest first) + all_paths.sort_by_key(|p| p.len()); + + let mut text = format!("Found {} paths from {} to {}:\n", all_paths.len(), src_name, dst_name); + for (idx, p) in all_paths.iter().enumerate() { + text.push_str(&format!("\n--- Path {} ---\n", idx + 1)); + text.push_str(&format_path_text(graph, p)); + } + + if let Some(ref dir) = out.output { + // -o specifies the output folder; save each path as a separate JSON file + std::fs::create_dir_all(dir) + .with_context(|| format!("Failed to create directory {}", dir.display()))?; + + for (idx, p) in all_paths.iter().enumerate() { + let json = format_path_json(graph, p); + let file = dir.join(format!("path_{}.json", idx + 1)); + let content = + serde_json::to_string_pretty(&json).context("Failed to serialize JSON")?; + std::fs::write(&file, &content) + .with_context(|| format!("Failed to write {}", file.display()))?; + } + eprintln!( + "Wrote {} path files to {}", + all_paths.len(), + dir.display() + ); + } else { + println!("{text}"); + } + + Ok(()) +} + +pub fn export(output: &Path) -> Result<()> { + let graph = ReductionGraph::new(); + + if let Some(parent) = output.parent() { + std::fs::create_dir_all(parent)?; + } + + graph + .to_json_file(output) + .map_err(|e| anyhow::anyhow!("Failed to export: {}", e))?; + + eprintln!( + "Exported reduction graph ({} types, {} reductions, {} variant nodes) to {}", + graph.num_types(), + graph.num_reductions(), + graph.num_variant_nodes(), + output.display() + ); + + Ok(()) +} diff --git a/problemreductions-cli/src/commands/mod.rs b/problemreductions-cli/src/commands/mod.rs new file mode 100644 index 00000000..2bc2e8f3 --- /dev/null +++ b/problemreductions-cli/src/commands/mod.rs @@ -0,0 +1,5 @@ +pub mod create; +pub mod evaluate; +pub mod graph; +pub mod reduce; +pub mod solve; diff --git a/problemreductions-cli/src/commands/reduce.rs b/problemreductions-cli/src/commands/reduce.rs new file mode 100644 index 00000000..f23640ed --- /dev/null +++ b/problemreductions-cli/src/commands/reduce.rs @@ -0,0 +1,205 @@ +use crate::dispatch::{ + load_problem, serialize_any_problem, PathStep, ProblemJson, ProblemJsonOutput, ReductionBundle, +}; +use crate::output::OutputConfig; +use crate::problem_name::parse_problem_spec; +use anyhow::{Context, Result}; +use problemreductions::rules::{MinimizeSteps, ReductionGraph, ReductionPath, ReductionStep}; +use problemreductions::types::ProblemSize; +use std::collections::BTreeMap; +use std::path::Path; + +/// Parse a path JSON file (produced by `pred path ... -o`) into a ReductionPath. +fn load_path_file(path_file: &Path) -> Result { + let content = + std::fs::read_to_string(path_file).context("Failed to read path file")?; + let json: serde_json::Value = + serde_json::from_str(&content).context("Failed to parse path file")?; + + let path_array = json["path"] + .as_array() + .ok_or_else(|| anyhow::anyhow!("Path file missing 'path' array"))?; + + let mut steps: Vec = Vec::new(); + for (i, entry) in path_array.iter().enumerate() { + if i == 0 { + let from = &entry["from"]; + steps.push(parse_path_node(from)?); + } + let to = &entry["to"]; + steps.push(parse_path_node(to)?); + } + + if steps.len() < 2 { + anyhow::bail!("Path file must contain at least one reduction step"); + } + + Ok(ReductionPath { steps }) +} + +fn parse_path_node(node: &serde_json::Value) -> Result { + let name = node["name"] + .as_str() + .ok_or_else(|| anyhow::anyhow!("Path node missing 'name'"))? + .to_string(); + let variant: BTreeMap = node + .get("variant") + .and_then(|v| serde_json::from_value(v.clone()).ok()) + .unwrap_or_default(); + Ok(ReductionStep { name, variant }) +} + +pub fn reduce( + input: &Path, + target: &str, + via: Option<&Path>, + out: &OutputConfig, +) -> Result<()> { + // 1. Load source problem + let content = std::fs::read_to_string(input)?; + let problem_json: ProblemJson = serde_json::from_str(&content)?; + + let source = load_problem( + &problem_json.problem_type, + &problem_json.variant, + problem_json.data.clone(), + )?; + + let source_name = source.problem_name(); + let source_variant = source.variant_map(); + + // 2. Parse target spec + let dst_spec = parse_problem_spec(target)?; + let graph = ReductionGraph::new(); + + let dst_variants = graph.variants_for(&dst_spec.name); + if dst_variants.is_empty() { + anyhow::bail!("Unknown target problem: {}", dst_spec.name); + } + + // 3. Get reduction path: from --via file or auto-discover + let reduction_path = if let Some(path_file) = via { + let path = load_path_file(path_file)?; + // Validate that the path starts with the source and ends with the target + let first = path.steps.first().unwrap(); + let last = path.steps.last().unwrap(); + if first.name != source_name || first.variant != source_variant { + anyhow::bail!( + "Path file starts with {} {} but source problem is {} {}", + first.name, + format_variant(&first.variant), + source_name, + format_variant(&source_variant), + ); + } + if last.name != dst_spec.name { + anyhow::bail!( + "Path file ends with {} but target is {}", + last.name, + dst_spec.name, + ); + } + path + } else { + // Auto-discover cheapest path + let input_size = ProblemSize::new(vec![]); + let mut best_path = None; + + for dv in &dst_variants { + if let Some(p) = graph.find_cheapest_path( + source_name, + &source_variant, + &dst_spec.name, + dv, + &input_size, + &MinimizeSteps, + ) { + let is_better = best_path + .as_ref() + .is_none_or(|bp: &ReductionPath| p.len() < bp.len()); + if is_better { + best_path = Some(p); + } + } + } + + best_path.ok_or_else(|| { + anyhow::anyhow!( + "No reduction path from {} to {}\n\n\ + Hint: generate a path file first, then pass it with --via:\n\ + pred path {} {} -o path.json\n\ + pred reduce {} --to {} --via path.json -o reduced.json", + source_name, + dst_spec.name, + source_name, + dst_spec.name, + input.display(), + dst_spec.name, + ) + })? + }; + + // 4. Execute reduction chain via reduce_along_path + let chain = graph + .reduce_along_path(&reduction_path, source.as_any()) + .ok_or_else(|| anyhow::anyhow!("Failed to execute reduction chain"))?; + + // 5. Serialize target + let target_step = reduction_path.steps.last().unwrap(); + let target_data = serialize_any_problem( + &target_step.name, + &target_step.variant, + chain.target_problem_any(), + )?; + + // 6. Build full reduction bundle + let bundle = ReductionBundle { + source: ProblemJsonOutput { + problem_type: source_name.to_string(), + variant: source_variant, + data: problem_json.data, + }, + target: ProblemJsonOutput { + problem_type: target_step.name.clone(), + variant: target_step.variant.clone(), + data: target_data, + }, + path: reduction_path + .steps + .iter() + .map(|s| PathStep { + name: s.name.clone(), + variant: s.variant.clone(), + }) + .collect(), + }; + + let json = serde_json::to_value(&bundle)?; + + if let Some(ref path) = out.output { + let content = + serde_json::to_string_pretty(&json).context("Failed to serialize JSON")?; + std::fs::write(path, &content) + .with_context(|| format!("Failed to write {}", path.display()))?; + eprintln!( + "Reduced {} to {} ({} steps)\nWrote {}", + source_name, + target_step.name, + reduction_path.len(), + path.display(), + ); + } else { + println!("{}", serde_json::to_string_pretty(&json)?); + } + + Ok(()) +} + +fn format_variant(v: &BTreeMap) -> String { + if v.is_empty() { + "(default)".to_string() + } else { + let pairs: Vec = v.iter().map(|(k, val)| format!("{k}={val}")).collect(); + format!("{{{}}}", pairs.join(", ")) + } +} diff --git a/problemreductions-cli/src/commands/solve.rs b/problemreductions-cli/src/commands/solve.rs new file mode 100644 index 00000000..607b5333 --- /dev/null +++ b/problemreductions-cli/src/commands/solve.rs @@ -0,0 +1,181 @@ +use crate::dispatch::{load_problem, ProblemJson, ReductionBundle}; +use crate::output::OutputConfig; +use anyhow::{Context, Result}; +use problemreductions::rules::ReductionGraph; +use std::path::Path; + +/// Input can be either a problem JSON or a reduction bundle JSON. +enum SolveInput { + /// A plain problem file (from `pred create`). + Problem(ProblemJson), + /// A reduction bundle (from `pred reduce`) with source, target, and path. + Bundle(ReductionBundle), +} + +fn parse_input(path: &Path) -> Result { + let content = std::fs::read_to_string(path) + .with_context(|| format!("Failed to read {}", path.display()))?; + let json: serde_json::Value = + serde_json::from_str(&content).context("Failed to parse JSON")?; + + // Reduction bundles have "source", "target", and "path" fields + if json.get("source").is_some() && json.get("target").is_some() && json.get("path").is_some() { + let bundle: ReductionBundle = + serde_json::from_value(json).context("Failed to parse reduction bundle")?; + Ok(SolveInput::Bundle(bundle)) + } else { + let problem: ProblemJson = + serde_json::from_value(json).context("Failed to parse problem JSON")?; + Ok(SolveInput::Problem(problem)) + } +} + +pub fn solve(input: &Path, solver_name: &str, out: &OutputConfig) -> Result<()> { + if solver_name != "brute-force" && solver_name != "ilp" { + anyhow::bail!( + "Unknown solver: {}. Available solvers: brute-force, ilp", + solver_name + ); + } + + let parsed = parse_input(input)?; + + match parsed { + SolveInput::Problem(problem_json) => { + solve_problem(&problem_json.problem_type, &problem_json.variant, problem_json.data, solver_name, out) + } + SolveInput::Bundle(bundle) => { + solve_bundle(bundle, solver_name, out) + } + } +} + +/// Solve a plain problem file directly. +fn solve_problem( + problem_type: &str, + variant: &std::collections::BTreeMap, + data: serde_json::Value, + solver_name: &str, + out: &OutputConfig, +) -> Result<()> { + let problem = load_problem(problem_type, variant, data)?; + let name = problem.problem_name(); + + match solver_name { + "brute-force" => { + let result = problem.solve_brute_force()?; + let text = format!( + "Problem: {}\nSolver: brute-force\nSolution: {:?}\nEvaluation: {}", + name, result.config, result.evaluation, + ); + let json = serde_json::json!({ + "problem": name, + "solver": "brute-force", + "solution": result.config, + "evaluation": result.evaluation, + }); + out.emit_with_default_name("", &text, &json) + } + "ilp" => { + let result = problem.solve_with_ilp()?; + let reduced = name != "ILP"; + let text = if reduced { + format!( + "Problem: {} (reduced to ILP)\nSolver: ilp\nSolution: {:?}\nEvaluation: {}", + name, result.config, result.evaluation, + ) + } else { + format!( + "Problem: ILP\nSolver: ilp\nSolution: {:?}\nEvaluation: {}", + result.config, result.evaluation, + ) + }; + let mut json = serde_json::json!({ + "problem": name, + "solver": "ilp", + "solution": result.config, + "evaluation": result.evaluation, + }); + if reduced { + json["reduced_to"] = serde_json::json!("ILP"); + } + out.emit_with_default_name("", &text, &json) + } + _ => unreachable!(), + } +} + +/// Solve a reduction bundle: solve the target problem, then map the solution back. +fn solve_bundle(bundle: ReductionBundle, solver_name: &str, out: &OutputConfig) -> Result<()> { + // 1. Load the target problem from the bundle + let target = load_problem( + &bundle.target.problem_type, + &bundle.target.variant, + bundle.target.data.clone(), + )?; + let target_name = target.problem_name(); + + // 2. Solve the target problem + let target_result = match solver_name { + "brute-force" => target.solve_brute_force()?, + "ilp" => target.solve_with_ilp()?, + _ => unreachable!(), + }; + + // 3. Load source problem and re-execute the reduction chain to get extract_solution + let source = load_problem( + &bundle.source.problem_type, + &bundle.source.variant, + bundle.source.data.clone(), + )?; + let source_name = source.problem_name(); + + let graph = ReductionGraph::new(); + + // Reconstruct the ReductionPath from the bundle's path steps + let reduction_path = problemreductions::rules::ReductionPath { + steps: bundle + .path + .iter() + .map(|s| problemreductions::rules::ReductionStep { + name: s.name.clone(), + variant: s.variant.clone(), + }) + .collect(), + }; + + let chain = graph + .reduce_along_path(&reduction_path, source.as_any()) + .ok_or_else(|| anyhow::anyhow!("Failed to re-execute reduction chain for solution extraction"))?; + + // 4. Extract solution back to source problem space + let source_config = chain.extract_solution(&target_result.config); + let source_eval = source.evaluate_dyn(&source_config); + + let text = format!( + "Source: {}\nTarget: {} (solved with {})\nTarget solution: {:?}\nTarget evaluation: {}\nSource solution: {:?}\nSource evaluation: {}", + source_name, + target_name, + solver_name, + target_result.config, + target_result.evaluation, + source_config, + source_eval, + ); + + let json = serde_json::json!({ + "source": { + "problem": source_name, + "solution": source_config, + "evaluation": source_eval, + }, + "target": { + "problem": target_name, + "solver": solver_name, + "solution": target_result.config, + "evaluation": target_result.evaluation, + }, + }); + + out.emit_with_default_name("", &text, &json) +} diff --git a/problemreductions-cli/src/dispatch.rs b/problemreductions-cli/src/dispatch.rs new file mode 100644 index 00000000..a5bb57d4 --- /dev/null +++ b/problemreductions-cli/src/dispatch.rs @@ -0,0 +1,326 @@ +use anyhow::{bail, Result}; +use problemreductions::prelude::*; +use problemreductions::models::optimization::ILP; +use problemreductions::rules::{MinimizeSteps, ReductionGraph}; +use problemreductions::solvers::{BruteForce, ILPSolver, Solver}; +use problemreductions::topology::{ + KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph, +}; +use problemreductions::types::ProblemSize; +use problemreductions::variant::{K2, K3, KN}; +use serde::Serialize; +use serde_json::Value; +use std::any::Any; +use std::collections::BTreeMap; +use std::fmt; +use std::ops::Deref; + +use crate::problem_name::resolve_alias; + +/// Type-erased problem for CLI dispatch. +#[allow(dead_code)] +pub trait DynProblem: Any { + fn evaluate_dyn(&self, config: &[usize]) -> String; + fn serialize_json(&self) -> Value; + fn as_any(&self) -> &dyn Any; + fn dims_dyn(&self) -> Vec; + fn problem_name(&self) -> &'static str; + fn variant_map(&self) -> BTreeMap; +} + +impl DynProblem for T +where + T: Problem + Serialize + 'static, + T::Metric: fmt::Debug, +{ + fn evaluate_dyn(&self, config: &[usize]) -> String { + format!("{:?}", self.evaluate(config)) + } + fn serialize_json(&self) -> Value { + serde_json::to_value(self).expect("serialize failed") + } + fn as_any(&self) -> &dyn Any { + self + } + fn dims_dyn(&self) -> Vec { + self.dims() + } + fn problem_name(&self) -> &'static str { + T::NAME + } + fn variant_map(&self) -> BTreeMap { + T::variant() + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() + } +} + +fn deser_opt(data: Value) -> Result +where + T: OptimizationProblem + Serialize + serde::de::DeserializeOwned + 'static, + T::Metric: fmt::Debug, +{ + let problem: T = serde_json::from_value(data)?; + Ok(LoadedProblem { + inner: Box::new(problem), + brute_force_fn: bf_opt::, + }) +} + +fn deser_sat(data: Value) -> Result +where + T: Problem + Serialize + serde::de::DeserializeOwned + 'static, +{ + let problem: T = serde_json::from_value(data)?; + Ok(LoadedProblem { + inner: Box::new(problem), + brute_force_fn: bf_sat::, + }) +} + +fn bf_opt(any: &dyn Any) -> Option +where + T: OptimizationProblem + 'static, + T::Metric: fmt::Debug, +{ + let p = any.downcast_ref::()?; + let config = BruteForce::new().find_best(p)?; + let evaluation = format!("{:?}", p.evaluate(&config)); + Some(SolveResult { config, evaluation }) +} + +fn bf_sat(any: &dyn Any) -> Option +where + T: Problem + 'static, +{ + let p = any.downcast_ref::()?; + let config = BruteForce::new().find_satisfying(p)?; + let evaluation = format!("{:?}", p.evaluate(&config)); + Some(SolveResult { config, evaluation }) +} + +/// Loaded problem with type-erased solve capability. +pub struct LoadedProblem { + inner: Box, + brute_force_fn: fn(&dyn Any) -> Option, +} + +impl Deref for LoadedProblem { + type Target = dyn DynProblem; + fn deref(&self) -> &(dyn DynProblem + 'static) { + &*self.inner + } +} + +impl LoadedProblem { + pub fn solve_brute_force(&self) -> Result { + (self.brute_force_fn)(self.inner.as_any()) + .ok_or_else(|| anyhow::anyhow!("No solution found")) + } + + /// Solve using the ILP solver. If the problem is not ILP, auto-reduce to ILP first. + pub fn solve_with_ilp(&self) -> Result { + let name = self.problem_name(); + if name == "ILP" { + return solve_ilp(self.as_any()); + } + + // Auto-reduce to ILP, solve, and map solution back + let source_variant = self.variant_map(); + let graph = ReductionGraph::new(); + let ilp_variants = graph.variants_for("ILP"); + let input_size = ProblemSize::new(vec![]); + + let mut best_path = None; + for dv in &ilp_variants { + if let Some(p) = graph.find_cheapest_path( + name, &source_variant, "ILP", dv, &input_size, &MinimizeSteps, + ) { + let is_better = best_path + .as_ref() + .is_none_or(|bp: &problemreductions::rules::ReductionPath| p.len() < bp.len()); + if is_better { + best_path = Some(p); + } + } + } + + let reduction_path = best_path + .ok_or_else(|| anyhow::anyhow!("No reduction path from {} to ILP", name))?; + + let chain = graph + .reduce_along_path(&reduction_path, self.as_any()) + .ok_or_else(|| anyhow::anyhow!("Failed to execute reduction chain to ILP"))?; + + let ilp_result = solve_ilp(chain.target_problem_any())?; + let config = chain.extract_solution(&ilp_result.config); + let evaluation = self.evaluate_dyn(&config); + Ok(SolveResult { config, evaluation }) + } +} + +fn graph_variant(variant: &BTreeMap) -> &str { + variant + .get("graph") + .map(|s| s.as_str()) + .unwrap_or("SimpleGraph") +} + +/// Load a problem from JSON type/variant/data. +pub fn load_problem( + name: &str, + variant: &BTreeMap, + data: Value, +) -> Result { + let canonical = resolve_alias(name); + match canonical.as_str() { + "MaximumIndependentSet" => match graph_variant(variant) { + "KingsSubgraph" => deser_opt::>(data), + "TriangularSubgraph" => deser_opt::>(data), + "UnitDiskGraph" => deser_opt::>(data), + _ => deser_opt::>(data), + }, + "MinimumVertexCover" => deser_opt::>(data), + "MaximumClique" => deser_opt::>(data), + "MaximumMatching" => deser_opt::>(data), + "MinimumDominatingSet" => deser_opt::>(data), + "MaxCut" => deser_opt::>(data), + "TravelingSalesman" => deser_opt::>(data), + "KColoring" => match variant.get("k").map(|s| s.as_str()) { + Some("K3") => deser_sat::>(data), + _ => deser_sat::>(data), + }, + "MaximumSetPacking" => deser_opt::>(data), + "MinimumSetCovering" => deser_opt::>(data), + "QUBO" => deser_opt::>(data), + "SpinGlass" => match variant.get("weight").map(|s| s.as_str()) { + Some("f64") => deser_opt::>(data), + _ => deser_opt::>(data), + }, + "Satisfiability" => deser_sat::(data), + "KSatisfiability" => match variant.get("k").map(|s| s.as_str()) { + Some("K2") => deser_sat::>(data), + Some("K3") => deser_sat::>(data), + _ => deser_sat::>(data), + }, + "CircuitSAT" => deser_sat::(data), + "Factoring" => deser_opt::(data), + "ILP" => deser_opt::(data), + "BicliqueCover" => deser_opt::(data), + "BMF" => deser_opt::(data), + "PaintShop" => deser_opt::(data), + _ => bail!("Unknown problem type: {canonical}"), + } +} + +/// Serialize a `&dyn Any` target problem given its name and variant. +pub fn serialize_any_problem( + name: &str, + variant: &BTreeMap, + any: &dyn Any, +) -> Result { + let canonical = resolve_alias(name); + match canonical.as_str() { + "MaximumIndependentSet" => match graph_variant(variant) { + "KingsSubgraph" => try_ser::>(any), + "TriangularSubgraph" => try_ser::>(any), + "UnitDiskGraph" => try_ser::>(any), + _ => try_ser::>(any), + }, + "MinimumVertexCover" => try_ser::>(any), + "MaximumClique" => try_ser::>(any), + "MaximumMatching" => try_ser::>(any), + "MinimumDominatingSet" => try_ser::>(any), + "MaxCut" => try_ser::>(any), + "TravelingSalesman" => try_ser::>(any), + "KColoring" => match variant.get("k").map(|s| s.as_str()) { + Some("K3") => try_ser::>(any), + _ => try_ser::>(any), + }, + "MaximumSetPacking" => match variant.get("weight").map(|s| s.as_str()) { + Some("f64") => try_ser::>(any), + _ => try_ser::>(any), + }, + "MinimumSetCovering" => try_ser::>(any), + "QUBO" => try_ser::>(any), + "SpinGlass" => match variant.get("weight").map(|s| s.as_str()) { + Some("f64") => try_ser::>(any), + _ => try_ser::>(any), + }, + "Satisfiability" => try_ser::(any), + "KSatisfiability" => match variant.get("k").map(|s| s.as_str()) { + Some("K2") => try_ser::>(any), + Some("K3") => try_ser::>(any), + _ => try_ser::>(any), + }, + "CircuitSAT" => try_ser::(any), + "Factoring" => try_ser::(any), + "ILP" => try_ser::(any), + "BicliqueCover" => try_ser::(any), + "BMF" => try_ser::(any), + "PaintShop" => try_ser::(any), + _ => bail!("Unknown problem type: {canonical}"), + } +} + +fn try_ser(any: &dyn Any) -> Result { + let problem = any + .downcast_ref::() + .ok_or_else(|| anyhow::anyhow!("Type mismatch during serialization"))?; + Ok(serde_json::to_value(problem)?) +} + +/// JSON wrapper format for problem files. +#[derive(serde::Deserialize)] +pub struct ProblemJson { + #[serde(rename = "type")] + pub problem_type: String, + #[serde(default)] + pub variant: BTreeMap, + pub data: Value, +} + +/// JSON wrapper format for reduction bundles. +#[derive(serde::Serialize, serde::Deserialize)] +pub struct ReductionBundle { + pub source: ProblemJsonOutput, + pub target: ProblemJsonOutput, + pub path: Vec, +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct ProblemJsonOutput { + #[serde(rename = "type")] + pub problem_type: String, + pub variant: BTreeMap, + pub data: Value, +} + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct PathStep { + pub name: String, + pub variant: BTreeMap, +} + +/// Result of solving a problem. +pub struct SolveResult { + /// The solution configuration. + pub config: Vec, + /// Evaluation of the solution. + pub evaluation: String, +} + +/// Solve an ILP problem directly. The input must be an `ILP` instance. +fn solve_ilp(any: &dyn Any) -> Result { + let problem = any + .downcast_ref::() + .ok_or_else(|| anyhow::anyhow!("Internal error: expected ILP problem instance"))?; + let solver = ILPSolver::new(); + let config = solver + .solve(problem) + .ok_or_else(|| anyhow::anyhow!("ILP solver found no feasible solution"))?; + let evaluation = format!("{:?}", problem.evaluate(&config)); + Ok(SolveResult { config, evaluation }) +} + diff --git a/problemreductions-cli/src/main.rs b/problemreductions-cli/src/main.rs new file mode 100644 index 00000000..aac972d1 --- /dev/null +++ b/problemreductions-cli/src/main.rs @@ -0,0 +1,50 @@ +mod cli; +mod commands; +mod dispatch; +mod output; +mod problem_name; + +use cli::{Cli, Commands}; +use clap::Parser; +use output::OutputConfig; + +fn main() -> anyhow::Result<()> { + let cli = match Cli::try_parse() { + Ok(cli) => cli, + Err(e) => { + // Let --help and --version print normally + if e.kind() == clap::error::ErrorKind::DisplayHelp + || e.kind() == clap::error::ErrorKind::DisplayVersion + { + e.exit(); + } + let msg = e.to_string(); + eprint!("{e}"); + // Show the subcommand's after_help (defined once in cli.rs) + cli::print_subcommand_help_hint(&msg); + std::process::exit(e.exit_code()); + } + }; + + let out = OutputConfig { + output: cli.output, + }; + + match cli.command { + Commands::List => commands::graph::list(&out), + Commands::Show { problem } => commands::graph::show(&problem, &out), + Commands::Path { + source, + target, + cost, + all, + } => commands::graph::path(&source, &target, &cost, all, &out), + Commands::ExportGraph { output } => commands::graph::export(&output), + Commands::Create(args) => commands::create::create(&args, &out), + Commands::Solve(args) => commands::solve::solve(&args.input, &args.solver, &out), + Commands::Reduce(args) => { + commands::reduce::reduce(&args.input, &args.to, args.via.as_deref(), &out) + } + Commands::Evaluate(args) => commands::evaluate::evaluate(&args.input, &args.config, &out), + } +} diff --git a/problemreductions-cli/src/output.rs b/problemreductions-cli/src/output.rs new file mode 100644 index 00000000..17a7297a --- /dev/null +++ b/problemreductions-cli/src/output.rs @@ -0,0 +1,30 @@ +use anyhow::Context; +use std::path::PathBuf; + +/// Output configuration derived from CLI flags. +#[derive(Debug, Clone)] +pub struct OutputConfig { + /// Output file path. When set, output is saved as JSON. + pub output: Option, +} + +impl OutputConfig { + /// Emit output: if `-o` is set, save as JSON; otherwise print human text. + pub fn emit_with_default_name( + &self, + _default_name: &str, + human_text: &str, + json_value: &serde_json::Value, + ) -> anyhow::Result<()> { + if let Some(ref path) = self.output { + let content = + serde_json::to_string_pretty(json_value).context("Failed to serialize JSON")?; + std::fs::write(path, &content) + .with_context(|| format!("Failed to write {}", path.display()))?; + eprintln!("Wrote {}", path.display()); + } else { + println!("{human_text}"); + } + Ok(()) + } +} diff --git a/problemreductions-cli/src/problem_name.rs b/problemreductions-cli/src/problem_name.rs new file mode 100644 index 00000000..99ee3d8a --- /dev/null +++ b/problemreductions-cli/src/problem_name.rs @@ -0,0 +1,175 @@ +use std::collections::BTreeMap; + +/// A parsed problem specification: name + optional variant values. +#[derive(Debug, Clone)] +pub struct ProblemSpec { + /// Resolved canonical problem name. + pub name: String, + /// Positional variant values (e.g., ["UnitDiskGraph", "i32"]). + pub variant_values: Vec, +} + +/// Alias entries: (alias, canonical_name). Only includes short aliases, +/// not the lowercase identity mappings. +pub const ALIASES: &[(&str, &str)] = &[ + ("MIS", "MaximumIndependentSet"), + ("MVC", "MinimumVertexCover"), + ("SAT", "Satisfiability"), + ("3SAT", "KSatisfiability"), + ("KSAT", "KSatisfiability"), + ("TSP", "TravelingSalesman"), +]; + +/// Resolve a short alias to the canonical problem name. +pub fn resolve_alias(input: &str) -> String { + match input.to_lowercase().as_str() { + "mis" => "MaximumIndependentSet".to_string(), + "mvc" | "minimumvertexcover" => "MinimumVertexCover".to_string(), + "sat" | "satisfiability" => "Satisfiability".to_string(), + "3sat" => "KSatisfiability".to_string(), + "ksat" | "ksatisfiability" => "KSatisfiability".to_string(), + "qubo" => "QUBO".to_string(), + "maxcut" => "MaxCut".to_string(), + "spinglass" => "SpinGlass".to_string(), + "ilp" => "ILP".to_string(), + "circuitsat" => "CircuitSAT".to_string(), + "factoring" => "Factoring".to_string(), + "maximumindependentset" => "MaximumIndependentSet".to_string(), + "maximumclique" => "MaximumClique".to_string(), + "maximummatching" => "MaximumMatching".to_string(), + "minimumdominatingset" => "MinimumDominatingSet".to_string(), + "minimumsetcovering" => "MinimumSetCovering".to_string(), + "maximumsetpacking" => "MaximumSetPacking".to_string(), + "kcoloring" => "KColoring".to_string(), + "maximalis" => "MaximalIS".to_string(), + "travelingsalesman" | "tsp" => "TravelingSalesman".to_string(), + "paintshop" => "PaintShop".to_string(), + "bmf" => "BMF".to_string(), + "bicliquecover" => "BicliqueCover".to_string(), + _ => input.to_string(), // pass-through for exact names + } +} + +/// Return the short aliases for a canonical problem name, if any. +pub fn aliases_for(canonical: &str) -> Vec<&'static str> { + ALIASES + .iter() + .filter(|(_, name)| *name == canonical) + .map(|(alias, _)| *alias) + .collect() +} + +/// Parse a problem spec string like "MIS/UnitDiskGraph/i32" into name + variant values. +pub fn parse_problem_spec(input: &str) -> anyhow::Result { + let parts: Vec<&str> = input.split('/').collect(); + let raw_name = parts[0]; + let mut variant_values: Vec = parts[1..].iter().map(|s| s.to_string()).collect(); + + let name = resolve_alias(raw_name); + + // Special case: "3SAT" implies K3 variant + if raw_name.to_lowercase() == "3sat" && variant_values.is_empty() { + variant_values.push("K3".to_string()); + } + + Ok(ProblemSpec { + name, + variant_values, + }) +} + +/// Build a variant BTreeMap by matching positional values against a problem's +/// known variant keys from the reduction graph. +pub fn resolve_variant( + spec: &ProblemSpec, + known_variants: &[BTreeMap], +) -> anyhow::Result> { + if spec.variant_values.is_empty() { + // Return the first (default) variant, or empty + return Ok(known_variants.first().cloned().unwrap_or_default()); + } + + // Get the variant keys from the first known variant + let keys: Vec = known_variants + .first() + .map(|v| v.keys().cloned().collect()) + .unwrap_or_default(); + + if spec.variant_values.len() > keys.len() { + anyhow::bail!( + "Too many variant values for {}: expected at most {} but got {}", + spec.name, + keys.len(), + spec.variant_values.len() + ); + } + + // Build the variant map: fill specified positions, use defaults for the rest + let mut result = known_variants.first().cloned().unwrap_or_default(); + for (i, value) in spec.variant_values.iter().enumerate() { + if let Some(key) = keys.get(i) { + result.insert(key.clone(), value.clone()); + } + } + + // Verify this variant exists + if !known_variants.contains(&result) { + anyhow::bail!( + "Unknown variant for {}: {:?}. Known variants: {:?}", + spec.name, + result, + known_variants + ); + } + + Ok(result) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_alias_resolution() { + assert_eq!(resolve_alias("MIS"), "MaximumIndependentSet"); + assert_eq!(resolve_alias("mis"), "MaximumIndependentSet"); + assert_eq!(resolve_alias("MVC"), "MinimumVertexCover"); + assert_eq!(resolve_alias("SAT"), "Satisfiability"); + assert_eq!(resolve_alias("3SAT"), "KSatisfiability"); + assert_eq!(resolve_alias("QUBO"), "QUBO"); + assert_eq!(resolve_alias("MaxCut"), "MaxCut"); + // Pass-through for full names + assert_eq!( + resolve_alias("MaximumIndependentSet"), + "MaximumIndependentSet" + ); + } + + #[test] + fn test_parse_problem_spec_bare() { + let spec = parse_problem_spec("MIS").unwrap(); + assert_eq!(spec.name, "MaximumIndependentSet"); + assert!(spec.variant_values.is_empty()); + } + + #[test] + fn test_parse_problem_spec_with_variants() { + let spec = parse_problem_spec("MIS/UnitDiskGraph").unwrap(); + assert_eq!(spec.name, "MaximumIndependentSet"); + assert_eq!(spec.variant_values, vec!["UnitDiskGraph"]); + } + + #[test] + fn test_parse_problem_spec_two_variants() { + let spec = parse_problem_spec("MIS/SimpleGraph/f64").unwrap(); + assert_eq!(spec.name, "MaximumIndependentSet"); + assert_eq!(spec.variant_values, vec!["SimpleGraph", "f64"]); + } + + #[test] + fn test_parse_problem_spec_3sat_alias() { + let spec = parse_problem_spec("3SAT").unwrap(); + assert_eq!(spec.name, "KSatisfiability"); + assert_eq!(spec.variant_values, vec!["K3"]); + } +} diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs new file mode 100644 index 00000000..06fc5340 --- /dev/null +++ b/problemreductions-cli/tests/cli_tests.rs @@ -0,0 +1,1212 @@ +use std::process::Command; + +fn pred() -> Command { + Command::new(env!("CARGO_BIN_EXE_pred")) +} + +#[test] +fn test_help() { + let output = pred().arg("--help").output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Explore NP-hard problem reductions")); +} + +#[test] +fn test_list() { + let output = pred().args(["list"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("MaximumIndependentSet")); + assert!(stdout.contains("QUBO")); +} + +#[test] +fn test_show() { + let output = pred().args(["show", "MIS"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("MaximumIndependentSet")); + assert!(stdout.contains("Reduces to")); +} + +#[test] +fn test_show_variants() { + let output = pred().args(["show", "MIS"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Variants")); +} + +#[test] +fn test_path() { + let output = pred().args(["path", "MIS", "QUBO"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Path")); + assert!(stdout.contains("step")); +} + +#[test] +fn test_path_save() { + let tmp = std::env::temp_dir().join("pred_test_path.json"); + let output = pred() + .args(["path", "MIS", "QUBO", "-o", tmp.to_str().unwrap()]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!(tmp.exists()); + let content = std::fs::read_to_string(&tmp).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["path"].is_array()); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_path_all() { + let output = pred() + .args(["path", "MIS", "QUBO", "--all"]) + .output() + .unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Found")); + assert!(stdout.contains("paths from")); +} + +#[test] +fn test_path_all_save() { + let dir = std::env::temp_dir().join("pred_test_all_paths"); + let _ = std::fs::remove_dir_all(&dir); + let output = pred() + .args([ + "path", + "MIS", + "QUBO", + "--all", + "-o", + dir.to_str().unwrap(), + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(dir.is_dir()); + let entries: Vec<_> = std::fs::read_dir(&dir).unwrap().collect(); + assert!(entries.len() > 1, "expected multiple path files"); + + // Verify first file is valid JSON + let first = dir.join("path_1.json"); + let content = std::fs::read_to_string(&first).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["path"].is_array()); + + std::fs::remove_dir_all(&dir).ok(); +} + +#[test] +fn test_export() { + let tmp = std::env::temp_dir().join("pred_test_export.json"); + let output = pred() + .args(["export-graph", tmp.to_str().unwrap()]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!(tmp.exists()); + let content = std::fs::read_to_string(&tmp).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["nodes"].is_array()); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_show_includes_fields() { + let output = pred().args(["show", "MIS"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Fields")); + assert!(stdout.contains("graph")); + assert!(stdout.contains("weights")); +} + +#[test] +fn test_list_json() { + let tmp = std::env::temp_dir().join("pred_test_list.json"); + let output = pred() + .args(["--output", tmp.to_str().unwrap(), "list"]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!(tmp.exists()); + let content = std::fs::read_to_string(&tmp).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["problems"].is_array()); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_unknown_problem() { + let output = pred().args(["show", "NonExistent"]).output().unwrap(); + assert!(!output.status.success()); +} + +#[test] +fn test_evaluate() { + let problem_json = r#"{ + "type": "MaximumIndependentSet", + "variant": {"graph": "SimpleGraph", "weight": "i32"}, + "data": { + "graph": {"inner": {"nodes": [null, null, null, null], "node_holes": [], "edge_property": "undirected", "edges": [[0,1,null],[1,2,null],[2,3,null]]}}, + "weights": [1, 1, 1, 1] + } + }"#; + let tmp = std::env::temp_dir().join("pred_test_evaluate.json"); + std::fs::write(&tmp, problem_json).unwrap(); + + let output = pred() + .args(["evaluate", tmp.to_str().unwrap(), "--config", "1,0,1,0"]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Valid")); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_evaluate_sat() { + let problem_json = r#"{ + "type": "Satisfiability", + "data": { + "num_vars": 3, + "clauses": [{"literals": [1, 2]}] + } + }"#; + let tmp = std::env::temp_dir().join("pred_test_eval_sat.json"); + std::fs::write(&tmp, problem_json).unwrap(); + + let output = pred() + .args(["evaluate", tmp.to_str().unwrap(), "--config", "1,1,0"]) + .output() + .unwrap(); + assert!(output.status.success()); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_reduce() { + let problem_json = r#"{ + "type": "MIS", + "variant": {"graph": "SimpleGraph", "weight": "i32"}, + "data": { + "graph": {"inner": {"nodes": [null, null, null, null], "node_holes": [], "edge_property": "undirected", "edges": [[0,1,null],[1,2,null],[2,3,null]]}}, + "weights": [1, 1, 1, 1] + } + }"#; + let input = std::env::temp_dir().join("pred_test_reduce_in.json"); + let output_file = std::env::temp_dir().join("pred_test_reduce_out.json"); + std::fs::write(&input, problem_json).unwrap(); + + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "reduce", + input.to_str().unwrap(), + "--to", + "QUBO", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(output_file.exists()); + + let content = std::fs::read_to_string(&output_file).unwrap(); + let bundle: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(bundle["source"]["type"], "MaximumIndependentSet"); + assert_eq!(bundle["target"]["type"], "QUBO"); + assert!(bundle["path"].is_array()); + + std::fs::remove_file(&input).ok(); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_reduce_via_path() { + // 1. Create problem + let problem_file = std::env::temp_dir().join("pred_test_reduce_via_in.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2,2-3", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + // 2. Generate path file + let path_file = std::env::temp_dir().join("pred_test_reduce_via_path.json"); + let path_out = pred() + .args([ + "path", + "MIS", + "QUBO", + "-o", + path_file.to_str().unwrap(), + ]) + .output() + .unwrap(); + assert!(path_out.status.success()); + + // 3. Reduce via path file + let output_file = std::env::temp_dir().join("pred_test_reduce_via_out.json"); + let reduce_out = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "reduce", + problem_file.to_str().unwrap(), + "--to", + "QUBO", + "--via", + path_file.to_str().unwrap(), + ]) + .output() + .unwrap(); + assert!( + reduce_out.status.success(), + "stderr: {}", + String::from_utf8_lossy(&reduce_out.stderr) + ); + assert!(output_file.exists()); + + let content = std::fs::read_to_string(&output_file).unwrap(); + let bundle: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(bundle["source"]["type"], "MaximumIndependentSet"); + assert_eq!(bundle["target"]["type"], "QUBO"); + + std::fs::remove_file(&problem_file).ok(); + std::fs::remove_file(&path_file).ok(); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_mis() { + let output_file = std::env::temp_dir().join("pred_test_create_mis.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2,2-3", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(output_file.exists()); + + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "MaximumIndependentSet"); + assert!(json["data"].is_object()); + + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_then_evaluate() { + // Create a problem + let problem_file = std::env::temp_dir().join("pred_test_create_eval.json"); + let create_output = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2,2-3", + "--weights", + "1,1,1,1", + ]) + .output() + .unwrap(); + assert!( + create_output.status.success(), + "create stderr: {}", + String::from_utf8_lossy(&create_output.stderr) + ); + + // Evaluate with the created problem + let eval_output = pred() + .args([ + "evaluate", + problem_file.to_str().unwrap(), + "--config", + "1,0,1,0", + ]) + .output() + .unwrap(); + assert!( + eval_output.status.success(), + "evaluate stderr: {}", + String::from_utf8_lossy(&eval_output.stderr) + ); + let stdout = String::from_utf8(eval_output.stdout).unwrap(); + assert!(stdout.contains("Valid")); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_create_sat() { + let output_file = std::env::temp_dir().join("pred_test_create_sat.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "SAT", + "--num-vars", + "3", + "--clauses", + "1,2;-1,3", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(output_file.exists()); + + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "Satisfiability"); + + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_qubo() { + let output_file = std::env::temp_dir().join("pred_test_create_qubo.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "QUBO", + "--matrix", + "1,0.5;0.5,2", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(output_file.exists()); + + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "QUBO"); + + std::fs::remove_file(&output_file).ok(); +} + +// ---- Solve command tests ---- + +#[test] +fn test_solve_brute_force() { + // Create a small MIS problem, then solve it + let problem_file = std::env::temp_dir().join("pred_test_solve_bf.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "solve", + problem_file.to_str().unwrap(), + "--solver", + "brute-force", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("brute-force")); + assert!(stdout.contains("Solution")); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_solve_ilp() { + let problem_file = std::env::temp_dir().join("pred_test_solve_ilp.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args(["solve", problem_file.to_str().unwrap(), "--solver", "ilp"]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("ilp")); + assert!(stdout.contains("Solution")); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_solve_ilp_default() { + // Default solver is ilp + let problem_file = std::env::temp_dir().join("pred_test_solve_default.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args(["solve", problem_file.to_str().unwrap()]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("reduced to ILP")); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_solve_json_output() { + let problem_file = std::env::temp_dir().join("pred_test_solve_json_in.json"); + let result_file = std::env::temp_dir().join("pred_test_solve_json_out.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "-o", + result_file.to_str().unwrap(), + "solve", + problem_file.to_str().unwrap(), + "--solver", + "brute-force", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(result_file.exists()); + + let content = std::fs::read_to_string(&result_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["solution"].is_array()); + assert_eq!(json["solver"], "brute-force"); + + std::fs::remove_file(&problem_file).ok(); + std::fs::remove_file(&result_file).ok(); +} + +#[test] +fn test_solve_bundle() { + // Create → Reduce → Solve bundle + let problem_file = std::env::temp_dir().join("pred_test_solve_bundle_in.json"); + let bundle_file = std::env::temp_dir().join("pred_test_solve_bundle.json"); + + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let reduce_out = pred() + .args([ + "-o", + bundle_file.to_str().unwrap(), + "reduce", + problem_file.to_str().unwrap(), + "--to", + "QUBO", + ]) + .output() + .unwrap(); + assert!( + reduce_out.status.success(), + "reduce stderr: {}", + String::from_utf8_lossy(&reduce_out.stderr) + ); + + // Solve the bundle with brute-force + let output = pred() + .args([ + "solve", + bundle_file.to_str().unwrap(), + "--solver", + "brute-force", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Source")); + assert!(stdout.contains("Target")); + + std::fs::remove_file(&problem_file).ok(); + std::fs::remove_file(&bundle_file).ok(); +} + +#[test] +fn test_solve_bundle_ilp() { + // Create → Reduce → Solve bundle with ILP + // Use MVC as target since it has an ILP reduction path (QUBO does not) + let problem_file = std::env::temp_dir().join("pred_test_solve_bundle_ilp_in.json"); + let bundle_file = std::env::temp_dir().join("pred_test_solve_bundle_ilp.json"); + + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let reduce_out = pred() + .args([ + "-o", + bundle_file.to_str().unwrap(), + "reduce", + problem_file.to_str().unwrap(), + "--to", + "MVC", + ]) + .output() + .unwrap(); + assert!( + reduce_out.status.success(), + "reduce stderr: {}", + String::from_utf8_lossy(&reduce_out.stderr) + ); + + let output = pred() + .args([ + "solve", + bundle_file.to_str().unwrap(), + "--solver", + "ilp", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Source")); + assert!(stdout.contains("Target")); + + std::fs::remove_file(&problem_file).ok(); + std::fs::remove_file(&bundle_file).ok(); +} + +#[test] +fn test_solve_unknown_solver() { + let problem_file = std::env::temp_dir().join("pred_test_solve_unknown.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "solve", + problem_file.to_str().unwrap(), + "--solver", + "unknown-solver", + ]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Unknown solver")); + + std::fs::remove_file(&problem_file).ok(); +} + +// ---- Create command: more problem types ---- + +#[test] +fn test_create_maxcut() { + let output_file = std::env::temp_dir().join("pred_test_create_maxcut.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "MaxCut", + "--edges", + "0-1,1-2,2-0", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "MaxCut"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_mvc() { + let output_file = std::env::temp_dir().join("pred_test_create_mvc.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "MVC", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "MinimumVertexCover"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_kcoloring() { + let output_file = std::env::temp_dir().join("pred_test_create_kcol.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "KColoring", + "--edges", + "0-1,1-2,2-0", + "--k", + "3", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "KColoring"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_spinglass() { + let output_file = std::env::temp_dir().join("pred_test_create_sg.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "SpinGlass", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "SpinGlass"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_3sat() { + let output_file = std::env::temp_dir().join("pred_test_create_3sat.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "3SAT", + "--num-vars", + "3", + "--clauses", + "1,2,3;-1,2,-3", + "--k", + "3", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "KSatisfiability"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_maximum_matching() { + let output_file = std::env::temp_dir().join("pred_test_create_mm.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "MaximumMatching", + "--edges", + "0-1,1-2,2-3", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "MaximumMatching"); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_with_edge_weights() { + let output_file = std::env::temp_dir().join("pred_test_create_ew.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "MaxCut", + "--edges", + "0-1,1-2,2-0", + "--weights", + "2,3,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_without_output() { + // Create without -o prints to stdout + let output = pred() + .args(["create", "MIS", "--edges", "0-1,1-2"]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Created")); +} + +// ---- Error cases ---- + +#[test] +fn test_create_unknown_problem() { + let output = pred() + .args(["create", "NonExistent", "--edges", "0-1"]) + .output() + .unwrap(); + assert!(!output.status.success()); +} + +#[test] +fn test_create_missing_edges() { + let output = pred().args(["create", "MIS"]).output().unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("--edges")); +} + +#[test] +fn test_create_kcoloring_missing_k() { + let output = pred() + .args(["create", "KColoring", "--edges", "0-1,1-2"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("--k")); +} + +#[test] +fn test_evaluate_wrong_config_length() { + let problem_file = std::env::temp_dir().join("pred_test_eval_wrong_len.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "evaluate", + problem_file.to_str().unwrap(), + "--config", + "1,0", + ]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("variables")); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_evaluate_json_output() { + let problem_file = std::env::temp_dir().join("pred_test_eval_json_in.json"); + let result_file = std::env::temp_dir().join("pred_test_eval_json_out.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "-o", + result_file.to_str().unwrap(), + "evaluate", + problem_file.to_str().unwrap(), + "--config", + "1,0,1", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(result_file.exists()); + let content = std::fs::read_to_string(&result_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert!(json["config"].is_array()); + + std::fs::remove_file(&problem_file).ok(); + std::fs::remove_file(&result_file).ok(); +} + +#[test] +fn test_path_unknown_source() { + let output = pred() + .args(["path", "NonExistent", "QUBO"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Unknown source")); +} + +#[test] +fn test_path_unknown_target() { + let output = pred() + .args(["path", "MIS", "NonExistent"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Unknown target")); +} + +#[test] +fn test_path_with_cost_minimize_field() { + let output = pred() + .args([ + "path", + "MIS", + "QUBO", + "--cost", + "minimize:num_variables", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Path")); +} + +#[test] +fn test_path_unknown_cost() { + let output = pred() + .args(["path", "MIS", "QUBO", "--cost", "bad-cost"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("Unknown cost function")); +} + +#[test] +fn test_show_json_output() { + let tmp = std::env::temp_dir().join("pred_test_show.json"); + let output = pred() + .args(["-o", tmp.to_str().unwrap(), "show", "MIS"]) + .output() + .unwrap(); + assert!(output.status.success()); + assert!(tmp.exists()); + let content = std::fs::read_to_string(&tmp).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["name"], "MaximumIndependentSet"); + assert!(json["variants"].is_array()); + assert!(json["reduces_to"].is_array()); + std::fs::remove_file(&tmp).ok(); +} + +#[test] +fn test_show_size_fields() { + let output = pred().args(["show", "MIS"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Size fields")); +} + +#[test] +fn test_reduce_unknown_target() { + let problem_file = std::env::temp_dir().join("pred_test_reduce_unknown.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "reduce", + problem_file.to_str().unwrap(), + "--to", + "NonExistent", + ]) + .output() + .unwrap(); + assert!(!output.status.success()); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_reduce_stdout() { + // Reduce without -o prints to stdout + let problem_file = std::env::temp_dir().join("pred_test_reduce_stdout.json"); + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "MIS", + "--edges", + "0-1,1-2", + ]) + .output() + .unwrap(); + assert!(create_out.status.success()); + + let output = pred() + .args([ + "reduce", + problem_file.to_str().unwrap(), + "--to", + "QUBO", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert!(json["source"].is_object()); + assert!(json["target"].is_object()); + + std::fs::remove_file(&problem_file).ok(); +} + +// ---- Help message tests ---- + +#[test] +fn test_incorrect_command_shows_help() { + // Missing required arguments should show after_help + let output = pred() + .args(["solve"]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + // The subcommand help hint should be shown + assert!( + stderr.contains("pred create") || stderr.contains("pred solve") || stderr.contains("Usage"), + "stderr should contain help: {stderr}" + ); +} + +#[test] +fn test_subcommand_help() { + let output = pred().args(["solve", "--help"]).output().unwrap(); + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("brute-force")); + assert!(stdout.contains("pred create")); +} diff --git a/src/rules/graph.rs b/src/rules/graph.rs index de998a21..b3d3405e 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -11,7 +11,6 @@ use crate::rules::cost::PathCostFn; use crate::rules::registry::{ReductionEntry, ReductionOverhead}; use crate::rules::traits::DynReductionResult; -use crate::traits::Problem; use crate::types::ProblemSize; use ordered_float::OrderedFloat; use petgraph::algo::all_simple_paths; @@ -21,7 +20,17 @@ use serde::Serialize; use std::any::Any; use std::cmp::Reverse; use std::collections::{BTreeMap, BinaryHeap, HashMap, HashSet}; -use std::marker::PhantomData; + + +/// A source/target pair from the reduction graph, returned by +/// [`ReductionGraph::outgoing_reductions`] and [`ReductionGraph::incoming_reductions`]. +#[derive(Debug, Clone)] +pub struct ReductionEdgeInfo { + pub source_name: &'static str, + pub source_variant: BTreeMap, + pub target_name: &'static str, + pub target_variant: BTreeMap, +} /// Internal edge data combining overhead and executable reduce function. #[derive(Clone)] @@ -623,6 +632,80 @@ impl ReductionGraph { .unwrap_or_default() } + /// Get all variant maps registered for a problem name. + /// + /// Returns an empty `Vec` if the name is not found. + pub fn variants_for(&self, name: &str) -> Vec> { + self.name_to_nodes + .get(name) + .map(|indices| { + indices + .iter() + .map(|&idx| self.nodes[self.graph[idx]].variant.clone()) + .collect() + }) + .unwrap_or_default() + } + + /// Get all outgoing reductions from a problem (across all its variants). + pub fn outgoing_reductions(&self, name: &str) -> Vec { + let Some(indices) = self.name_to_nodes.get(name) else { + return vec![]; + }; + let index_set: HashSet = indices.iter().copied().collect(); + self.graph + .edge_references() + .filter(|e| index_set.contains(&e.source())) + .map(|e| { + let src = &self.nodes[self.graph[e.source()]]; + let dst = &self.nodes[self.graph[e.target()]]; + ReductionEdgeInfo { + source_name: src.name, + source_variant: src.variant.clone(), + target_name: dst.name, + target_variant: dst.variant.clone(), + } + }) + .collect() + } + + /// Get the problem size field names for a problem type. + /// + /// Returns the static `problem_size_names()` by finding a reduction entry + /// where this problem is the source or target. + pub fn size_field_names(&self, name: &str) -> &'static [&'static str] { + for entry in inventory::iter:: { + if entry.source_name == name { + return (entry.source_size_names_fn)(); + } + if entry.target_name == name { + return (entry.target_size_names_fn)(); + } + } + &[] + } + + /// Get all incoming reductions to a problem (across all its variants). + pub fn incoming_reductions(&self, name: &str) -> Vec { + let Some(indices) = self.name_to_nodes.get(name) else { + return vec![]; + }; + let index_set: HashSet = indices.iter().copied().collect(); + self.graph + .edge_references() + .filter(|e| index_set.contains(&e.target())) + .map(|e| { + let src = &self.nodes[self.graph[e.source()]]; + let dst = &self.nodes[self.graph[e.target()]]; + ReductionEdgeInfo { + source_name: src.name, + source_variant: src.variant.clone(), + target_name: dst.name, + target_variant: dst.variant.clone(), + } + }) + .collect() + } } impl Default for ReductionGraph { @@ -855,63 +938,34 @@ pub struct MatchedEntry { pub overhead: ReductionOverhead, } -/// Type alias for a dynamically-dispatched reduce function pointer. -type ReduceFn = fn(&dyn Any) -> Box; - -/// A typed, executable reduction path from source type `S` to target type `T`. -pub struct ExecutablePath { - edge_fns: Vec, - _phantom: PhantomData<(S, T)>, -} - -impl ExecutablePath { - /// Execute the reduction path on a source problem instance. - pub fn reduce(&self, source: &S) -> ChainedReduction { - let mut steps: Vec> = Vec::new(); - let step = (self.edge_fns[0])(source as &dyn Any); - steps.push(step); - for edge_fn in &self.edge_fns[1..] { - let step = { - let prev_target = steps.last().unwrap().target_problem_any(); - edge_fn(prev_target) - }; - steps.push(step); - } - ChainedReduction { - steps, - _phantom: PhantomData, - } - } - - /// Number of reduction steps in the path. - pub fn len(&self) -> usize { - self.edge_fns.len() - } - - /// Whether the path is empty (zero steps). - pub fn is_empty(&self) -> bool { - self.edge_fns.is_empty() - } -} - -/// A composed reduction from source type `S` to target type `T`. -pub struct ChainedReduction { +/// A composed reduction chain produced by [`ReductionGraph::reduce_along_path`]. +/// +/// Holds the intermediate reduction results from executing a multi-step +/// reduction path. Provides access to the final target problem and +/// solution extraction back to the source problem space. +pub struct ReductionChain { steps: Vec>, - _phantom: PhantomData<(S, T)>, } -impl ChainedReduction { - /// Get a reference to the target problem. - pub fn target_problem(&self) -> &T { +impl ReductionChain { + /// Get the final target problem as a type-erased reference. + pub fn target_problem_any(&self) -> &dyn Any { self.steps .last() - .expect("ChainedReduction has no steps") + .expect("ReductionChain has no steps") .target_problem_any() + } + + /// Get a typed reference to the final target problem. + /// + /// Panics if the actual target type does not match `T`. + pub fn target_problem(&self) -> &T { + self.target_problem_any() .downcast_ref::() - .expect("final step target type mismatch") + .expect("ReductionChain target type mismatch") } - /// Extract a solution from target space to source space. + /// Extract a solution from target space back to source space. pub fn extract_solution(&self, target_solution: &[usize]) -> Vec { self.steps .iter() @@ -923,16 +977,28 @@ impl ChainedReduction { } impl ReductionGraph { - /// Convert a `ReductionPath` into a typed, executable path. + /// Execute a reduction path on a source problem instance. + /// + /// Looks up each edge's `reduce_fn`, chains them, and returns the + /// resulting [`ReductionChain`]. The source must be passed as `&dyn Any` + /// (use `&problem as &dyn Any` or pass a concrete reference directly). + /// + /// # Example /// - /// Looks up each edge's `reduce_fn` from the graph along the path steps. - pub fn make_executable( + /// ```text + /// let chain = graph.reduce_along_path(&path, &source_problem)?; + /// let target: &QUBO = chain.target_problem(); + /// let source_solution = chain.extract_solution(&target_solution); + /// ``` + pub fn reduce_along_path( &self, path: &ReductionPath, - ) -> Option> { + source: &dyn Any, + ) -> Option { if path.steps.len() < 2 { return None; } + // Collect edge reduce_fns let mut edge_fns = Vec::new(); for window in path.steps.windows(2) { let src = self.lookup_node(&window[0].name, &window[0].variant)?; @@ -940,10 +1006,18 @@ impl ReductionGraph { let edge_idx = self.graph.find_edge(src, dst)?; edge_fns.push(self.graph[edge_idx].reduce_fn); } - Some(ExecutablePath { - edge_fns, - _phantom: PhantomData, - }) + // Execute the chain + let mut steps: Vec> = Vec::new(); + let step = (edge_fns[0])(source); + steps.push(step); + for edge_fn in &edge_fns[1..] { + let step = { + let prev_target = steps.last().unwrap().target_problem_any(); + edge_fn(prev_target) + }; + steps.push(step); + } + Some(ReductionChain { steps }) } } diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 97f1bf71..678cd0a4 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -35,31 +35,31 @@ mod traits; pub mod unitdiskmapping; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod coloring_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod factoring_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod ilp_qubo; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod maximumclique_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod maximumindependentset_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod maximummatching_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod maximumsetpacking_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod minimumdominatingset_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod minimumsetcovering_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod minimumvertexcover_ilp; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] mod travelingsalesman_ilp; pub use graph::{ - ChainedReduction, ExecutablePath, ReductionGraph, ReductionPath, ReductionStep, + ReductionChain, ReductionEdgeInfo, ReductionGraph, ReductionPath, ReductionStep, }; #[cfg(test)] pub(crate) use graph::validate_overhead_variables; diff --git a/src/rules/traits.rs b/src/rules/traits.rs index e5ec2ab4..c4b67235 100644 --- a/src/rules/traits.rs +++ b/src/rules/traits.rs @@ -100,7 +100,7 @@ impl ReductionResult for ReductionAutoCast { /// Type-erased reduction result for runtime-discovered paths. /// /// Implemented automatically for all `ReductionResult` types via blanket impl. -/// Used internally by `ExecutablePath` and `ChainedReduction`. +/// Used internally by `ReductionChain`. pub trait DynReductionResult { /// Get the target problem as a type-erased reference. fn target_problem_any(&self) -> &dyn Any; diff --git a/src/solvers/mod.rs b/src/solvers/mod.rs index 5d5bf0a7..894bc820 100644 --- a/src/solvers/mod.rs +++ b/src/solvers/mod.rs @@ -2,12 +2,12 @@ mod brute_force; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] pub mod ilp; pub use brute_force::BruteForce; -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] pub use ilp::ILPSolver; use crate::traits::{OptimizationProblem, Problem}; diff --git a/src/unit_tests/rules/graph.rs b/src/unit_tests/rules/graph.rs index fa51c9a9..41c1bf98 100644 --- a/src/unit_tests/rules/graph.rs +++ b/src/unit_tests/rules/graph.rs @@ -793,7 +793,7 @@ fn test_classify_problem_category() { } #[test] -fn test_make_executable_direct() { +fn test_reduce_along_path_direct() { let graph = ReductionGraph::new(); let src = ReductionGraph::variant_to_map(&MaximumIndependentSet::::variant()); let dst = ReductionGraph::variant_to_map(&MinimumVertexCover::::variant()); @@ -807,15 +807,17 @@ fn test_make_executable_direct() { &MinimizeSteps, ) .unwrap(); - let path = graph.make_executable::< - MaximumIndependentSet, - MinimumVertexCover, - >(&rpath); - assert!(path.is_some()); + // Just verify the path can produce a chain with a dummy source + let source = MaximumIndependentSet::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1i32; 4], + ); + let chain = graph.reduce_along_path(&rpath, &source as &dyn std::any::Any); + assert!(chain.is_some()); } #[test] -fn test_chained_reduction_direct() { +fn test_reduction_chain_direct() { use crate::solvers::{BruteForce, Solver}; use crate::traits::Problem; @@ -832,29 +834,25 @@ fn test_chained_reduction_direct() { &MinimizeSteps, ) .unwrap(); - let path = graph - .make_executable::< - MaximumIndependentSet, - MinimumVertexCover, - >(&rpath) - .unwrap(); let problem = MaximumIndependentSet::new( SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), vec![1i32; 4], ); - let reduction = path.reduce(&problem); - let target = reduction.target_problem(); + let chain = graph + .reduce_along_path(&rpath, &problem as &dyn std::any::Any) + .unwrap(); + let target: &MinimumVertexCover = chain.target_problem(); let solver = BruteForce::new(); let target_solution = solver.find_best(target).unwrap(); - let source_solution = reduction.extract_solution(&target_solution); + let source_solution = chain.extract_solution(&target_solution); let metric = problem.evaluate(&source_solution); assert!(metric.is_valid()); } #[test] -fn test_chained_reduction_multi_step() { +fn test_reduction_chain_multi_step() { use crate::solvers::{BruteForce, Solver}; use crate::traits::Problem; @@ -871,26 +869,25 @@ fn test_chained_reduction_multi_step() { &MinimizeSteps, ) .unwrap(); - let path = graph - .make_executable::, MaximumSetPacking>(&rpath) - .unwrap(); let problem = MaximumIndependentSet::new( SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), vec![1i32; 4], ); - let reduction = path.reduce(&problem); - let target = reduction.target_problem(); + let chain = graph + .reduce_along_path(&rpath, &problem as &dyn std::any::Any) + .unwrap(); + let target: &MaximumSetPacking = chain.target_problem(); let solver = BruteForce::new(); let target_solution = solver.find_best(target).unwrap(); - let source_solution = reduction.extract_solution(&target_solution); + let source_solution = chain.extract_solution(&target_solution); let metric = problem.evaluate(&source_solution); assert!(metric.is_valid()); } #[test] -fn test_chained_reduction_with_variant_casts() { +fn test_reduction_chain_with_variant_casts() { use crate::models::satisfiability::{CNFClause, KSatisfiability}; use crate::rules::MinimizeSteps; use crate::solvers::{BruteForce, Solver}; @@ -924,23 +921,18 @@ fn test_chained_reduction_with_variant_casts() { "Path should cross variant cast boundary (at least 2 steps)" ); - let path = graph - .make_executable::< - MaximumIndependentSet, - MinimumVertexCover, - >(&rpath) - .unwrap(); - // Create a small UnitDiskGraph MIS problem (triangle of close nodes) let udg = UnitDiskGraph::new(vec![(0.0, 0.0), (0.5, 0.0), (0.25, 0.4)], 1.0); let mis = MaximumIndependentSet::new(udg, vec![1i32, 1, 1]); - let reduction = path.reduce(&mis); - let target = reduction.target_problem(); + let chain = graph + .reduce_along_path(&rpath, &mis as &dyn std::any::Any) + .unwrap(); + let target: &MinimumVertexCover = chain.target_problem(); let solver = BruteForce::new(); let target_solution = solver.find_best(target).unwrap(); - let source_solution = reduction.extract_solution(&target_solution); + let source_solution = chain.extract_solution(&target_solution); let metric = mis.evaluate(&source_solution); assert!(metric.is_valid()); @@ -964,12 +956,6 @@ fn test_chained_reduction_with_variant_casts() { "Should find path from KSat to MIS" ); let ksat_rpath = ksat_rpath.unwrap(); - let ksat_path = graph - .make_executable::< - KSatisfiability, - MaximumIndependentSet, - >(&ksat_rpath) - .unwrap(); // Create a 3-SAT formula let ksat = KSatisfiability::::new( @@ -982,11 +968,13 @@ fn test_chained_reduction_with_variant_casts() { ], ); - let reduction = ksat_path.reduce(&ksat); - let target = reduction.target_problem(); + let ksat_chain = graph + .reduce_along_path(&ksat_rpath, &ksat as &dyn std::any::Any) + .unwrap(); + let target: &MaximumIndependentSet = ksat_chain.target_problem(); let target_solution = solver.find_best(target).unwrap(); - let original_solution = reduction.extract_solution(&target_solution); + let original_solution = ksat_chain.extract_solution(&target_solution); // Verify the extracted solution satisfies the original 3-SAT formula assert!(ksat.evaluate(&original_solution)); diff --git a/src/unit_tests/rules/reduction_path_parity.rs b/src/unit_tests/rules/reduction_path_parity.rs index 6899ee57..7ca5ca44 100644 --- a/src/unit_tests/rules/reduction_path_parity.rs +++ b/src/unit_tests/rules/reduction_path_parity.rs @@ -1,5 +1,5 @@ //! Reduction path parity tests — mirrors Julia's test/reduction_path.jl. -//! Verifies that chained reductions via `find_cheapest_path` + `make_executable` +//! Verifies that chained reductions via `find_cheapest_path` + `reduce_along_path` //! produce correct solutions matching direct source solves. use crate::models::graph::MaxCut; @@ -29,9 +29,6 @@ fn test_jl_parity_maxcut_to_spinglass_path() { &MinimizeSteps, ) .expect("Should find path MaxCut -> SpinGlass"); - let path = graph - .make_executable::, SpinGlass>(&rpath) - .expect("Should make executable path"); // Petersen graph: 10 vertices, 15 edges let petersen_edges = vec![ @@ -52,15 +49,17 @@ fn test_jl_parity_maxcut_to_spinglass_path() { (7, 9), ]; let source = MaxCut::::unweighted(SimpleGraph::new(10, petersen_edges)); - let reduction = path.reduce(&source); - let target = reduction.target_problem(); + let chain = graph + .reduce_along_path(&rpath, &source as &dyn std::any::Any) + .expect("Should reduce along path"); + let target: &SpinGlass = chain.target_problem(); // Verify target is SpinGlass assert_eq!(SpinGlass::::NAME, "SpinGlass"); let solver = BruteForce::new(); let target_solution = solver.find_best(target).unwrap(); - let source_solution = reduction.extract_solution(&target_solution); + let source_solution = chain.extract_solution(&target_solution); // Source solution should be valid let metric = source.evaluate(&source_solution); @@ -84,9 +83,6 @@ fn test_jl_parity_maxcut_to_qubo_path() { &MinimizeSteps, ) .expect("Should find path MaxCut -> QUBO"); - let path = graph - .make_executable::, QUBO>(&rpath) - .expect("Should make executable path"); // Use a small graph for brute-force feasibility let petersen_edges = vec![ @@ -107,16 +103,18 @@ fn test_jl_parity_maxcut_to_qubo_path() { (7, 9), ]; let source = MaxCut::::unweighted(SimpleGraph::new(10, petersen_edges)); - let reduction = path.reduce(&source); + let chain = graph + .reduce_along_path(&rpath, &source as &dyn std::any::Any) + .expect("Should reduce along path"); let solver = BruteForce::new(); let best_source: HashSet> = solver.find_all_best(&source).into_iter().collect(); - let best_target = solver.find_all_best(reduction.target_problem()); + let best_target = solver.find_all_best(chain.target_problem::>()); // Julia: sort(extract_solution.(Ref(res), best2)) == sort(best1) let extracted: HashSet> = best_target .iter() - .map(|t| reduction.extract_solution(t)) + .map(|t| chain.extract_solution(t)) .collect(); assert_eq!( extracted, best_source, @@ -127,7 +125,7 @@ fn test_jl_parity_maxcut_to_qubo_path() { /// Julia: factoring = Factoring(2, 1, 3) /// Julia: paths = reduction_paths(Factoring, SpinGlass) /// Julia: all(solution_size.(Ref(factoring), extract_solution.(Ref(res), sol)) .== Ref(SolutionSize(0, true))) -#[cfg(feature = "ilp")] +#[cfg(feature = "ilp-solver")] #[test] fn test_jl_parity_factoring_to_spinglass_path() { use crate::solvers::ILPSolver; @@ -145,14 +143,13 @@ fn test_jl_parity_factoring_to_spinglass_path() { &MinimizeSteps, ) .expect("Should find path Factoring -> SpinGlass"); - let path = graph - .make_executable::>(&rpath) - .expect("Should make executable path"); // Julia: Factoring(2, 1, 3) — factor 3 with 2-bit x 1-bit let factoring = Factoring::new(2, 1, 3); - let reduction = path.reduce(&factoring); - let target = reduction.target_problem(); + let chain = graph + .reduce_along_path(&rpath, &factoring as &dyn std::any::Any) + .expect("Should reduce along path"); + let target: &SpinGlass = chain.target_problem(); // Verify reduction produces a valid SpinGlass problem assert!(target.num_variables() > 0, "SpinGlass should have variables"); diff --git a/tests/suites/reductions.rs b/tests/suites/reductions.rs index 314c8def..19fd2ec1 100644 --- a/tests/suites/reductions.rs +++ b/tests/suites/reductions.rs @@ -664,7 +664,7 @@ mod qubo_reductions { assert_eq!(&our_config, gt_config); } - #[cfg(feature = "ilp")] + #[cfg(feature = "ilp-solver")] #[derive(Deserialize)] struct ILPToQuboData { source: ILPSource, @@ -672,7 +672,7 @@ mod qubo_reductions { qubo_optimal: QuboOptimal, } - #[cfg(feature = "ilp")] + #[cfg(feature = "ilp-solver")] #[derive(Deserialize)] struct ILPSource { num_variables: usize, @@ -682,7 +682,7 @@ mod qubo_reductions { constraint_signs: Vec, } - #[cfg(feature = "ilp")] + #[cfg(feature = "ilp-solver")] #[test] fn test_ilp_to_qubo_ground_truth() { let json = std::fs::read_to_string("tests/data/qubo/ilp_to_qubo.json").unwrap();