diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 9fcc75eb..3a1f13b6 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -139,6 +139,7 @@ "ConsecutiveBlockMinimization": [Consecutive Block Minimization], "ConsecutiveOnesSubmatrix": [Consecutive Ones Submatrix], "SequencingToMinimizeMaximumCumulativeCost": [Sequencing to Minimize Maximum Cumulative Cost], + "SequencingToMinimizeWeightedCompletionTime": [Sequencing to Minimize Weighted Completion Time], "SequencingToMinimizeWeightedTardiness": [Sequencing to Minimize Weighted Tardiness], "SequencingWithinIntervals": [Sequencing Within Intervals], "SumOfSquaresPartition": [Sum of Squares Partition], @@ -3716,6 +3717,72 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], ] } +#{ + let x = load-model-example("SequencingToMinimizeWeightedCompletionTime") + let lengths = x.instance.lengths + let weights = x.instance.weights + let precs = x.instance.precedences + let ntasks = lengths.len() + let sol = x.optimal.at(0) + let opt = sol.metric.Valid + let lehmer = sol.config + let schedule = { + let avail = range(ntasks) + let result = () + for c in lehmer { + result.push(avail.at(c)) + avail = avail.enumerate().filter(((i, v)) => i != c).map(((i, v)) => v) + } + result + } + let starts = () + let finishes = () + let elapsed = 0 + for task in schedule { + starts.push(elapsed) + elapsed += lengths.at(task) + finishes.push(elapsed) + } + let total-time = elapsed + [ + #problem-def("SequencingToMinimizeWeightedCompletionTime")[ + Given a set $T$ of $n$ tasks, a processing-time function $l: T -> ZZ^+$, a weight function $w: T -> ZZ^+$, and a partial order $prec.eq$ on $T$, find a one-machine schedule minimizing $sum_(t in T) w(t) C(t)$, where $C(t)$ is the completion time of task $t$ and every precedence relation $t_i prec.eq t_j$ requires task $t_i$ to complete before task $t_j$ starts. + ][ + Sequencing to Minimize Weighted Completion Time is the single-machine precedence-constrained scheduling problem catalogued as SS4 in Garey & Johnson @garey1979, usually written $1 | "prec" | sum w_j C_j$. Lawler showed that arbitrary precedence constraints make the problem NP-complete, while series-parallel precedence orders admit an $O(n log n)$ algorithm @lawler1978. Without precedence constraints, Smith's ratio rule orders jobs by non-increasing $w_j / l_j$ and is optimal @smith1956. + + *Example.* Consider tasks with lengths $l = (#lengths.map(v => str(v)).join(", "))$, weights $w = (#weights.map(v => str(v)).join(", "))$, and precedence constraints #{precs.map(p => [$t_#(p.at(0)) prec.eq t_#(p.at(1))$]).join(", ")}. An optimal schedule is $(#schedule.map(t => $t_#t$).join(", "))$, with completion times $(#finishes.map(v => str(v)).join(", "))$ along the machine timeline and objective value $#opt$. + + #figure( + canvas(length: 1cm, { + import draw: * + let colors = (rgb("#4e79a7"), rgb("#e15759"), rgb("#76b7b2"), rgb("#f28e2b"), rgb("#59a14f")) + let scale = 0.55 + let row-h = 0.7 + + for (pos, task) in schedule.enumerate() { + let x0 = starts.at(pos) * scale + let x1 = finishes.at(pos) * scale + let color = colors.at(calc.rem(task, colors.len())) + rect((x0, -row-h / 2), (x1, row-h / 2), + fill: color.transparentize(30%), stroke: 0.4pt + color) + content(((x0 + x1) / 2, 0), text(7pt, $t_#task$)) + } + + let y-axis = -row-h / 2 - 0.22 + line((0, y-axis), (total-time * scale, y-axis), stroke: 0.4pt) + for t in range(total-time + 1) { + let x = t * scale + line((x, y-axis), (x, y-axis - 0.08), stroke: 0.4pt) + content((x, y-axis - 0.22), text(6pt, str(t))) + } + content((total-time * scale / 2, y-axis - 0.45), text(7pt)[time]) + }), + caption: [Optimal single-machine schedule for the canonical weighted-completion-time instance. Each block width equals the processing time $l_j$.], + ) + ] + ] +} + #{ let x = load-model-example("SequencingToMinimizeWeightedTardiness") let lengths = x.instance.lengths @@ -4898,6 +4965,28 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ For each item $i$, find the unique $j$ with $x_(i j) = 1$; assign item $i$ to bin $j$. ] +#reduction-rule("SequencingToMinimizeWeightedCompletionTime", "ILP")[ + Completion times are natural integer variables, precedence constraints compare those completion times directly, and one binary order variable per task pair enforces that a single machine cannot overlap two jobs. +][ + _Construction._ For each task $j$, introduce an integer completion-time variable $C_j$. For each unordered pair $i < j$, introduce a binary order variable $y_(i j)$ with $y_(i j) = 1$ meaning task $i$ finishes before task $j$. Let $M = sum_h l_h$. + + _Bounds._ $l_j <= C_j <= M$ for every task $j$, and $y_(i j) in {0, 1}$. + + _Precedence constraints._ If $i prec.eq j$, require $C_j - C_i >= l_j$. + + _Single-machine disjunction._ For every pair $i < j$, require + $C_j - C_i + M (1 - y_(i j)) >= l_j$ + and + $C_i - C_j + M y_(i j) >= l_i$. + Exactly one of the two orderings is therefore active. + + _Objective._ Minimize $sum_j w_j C_j$. + + _Correctness._ ($arrow.r.double$) Any feasible schedule defines completion times and pairwise order values satisfying the bounds, precedence inequalities, and disjunctive machine constraints; its weighted completion time is exactly the ILP objective. ($arrow.l.double$) Any feasible ILP solution assigns a strict order to every task pair and forbids overlap, so the completion times correspond to a valid single-machine schedule that respects all precedences. Minimizing the ILP objective therefore minimizes the original weighted completion-time objective. + + _Solution extraction._ Sort tasks by their completion times $C_j$ and encode that order back into the source schedule representation. +] + #reduction-rule("TravelingSalesman", "ILP", example: true, example-caption: [Weighted $K_4$: the optimal tour $0 arrow 1 arrow 3 arrow 2 arrow 0$ with cost 80 is found by position-based ILP.], diff --git a/docs/paper/references.bib b/docs/paper/references.bib index 04538c10..87db34ee 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -39,6 +39,27 @@ @article{moore1968 doi = {10.1287/mnsc.15.1.102} } +@article{lawler1978, + author = {Eugene L. Lawler}, + title = {Sequencing Jobs to Minimize Total Weighted Completion Time Subject to Precedence Constraints}, + journal = {Annals of Discrete Mathematics}, + volume = {2}, + pages = {75--90}, + year = {1978}, + doi = {10.1016/S0167-5060(08)70356-7} +} + +@article{smith1956, + author = {W. E. Smith}, + title = {Various Optimizers for Single-Stage Production}, + journal = {Naval Research Logistics Quarterly}, + volume = {3}, + number = {1--2}, + pages = {59--66}, + year = {1956}, + doi = {10.1002/nav.3800030106} +} + @article{johnson1954, author = {Selmer M. Johnson}, title = {Optimal two- and three-stage production schedules with setup times included}, diff --git a/docs/src/cli.md b/docs/src/cli.md index 38fe48f8..80aa6ff6 100644 --- a/docs/src/cli.md +++ b/docs/src/cli.md @@ -366,6 +366,7 @@ pred create MinimumCardinalityKey --num-attributes 6 --dependencies "0,1>2;0,2>3 pred create MinimumTardinessSequencing --n 5 --deadlines 5,5,5,3,3 --precedence-pairs "0>3,1>3,1>4,2>4" -o mts.json pred create SchedulingWithIndividualDeadlines --n 7 --deadlines 2,1,2,2,3,3,2 --num-processors 3 --precedence-pairs "0>3,1>3,1>4,2>4,2>5" -o swid.json pred solve swid.json --solver brute-force +pred create SequencingToMinimizeWeightedCompletionTime --lengths 2,1,3,1,2 --weights 3,5,1,4,2 --precedence-pairs "0>2,1>4" -o stmwct.json pred create StringToStringCorrection --source-string "0,1,2,3,1,0" --target-string "0,1,3,2,1" --bound 2 | pred solve - --solver brute-force pred create StrongConnectivityAugmentation --arcs "0>1,1>2,2>0,3>4,4>3,2>3,4>5,5>3" --candidate-arcs "3>0:5,3>1:3,3>2:4,4>0:6,4>1:2,4>2:7,5>0:4,5>1:3,5>2:1,0>3:8,0>4:3,0>5:2,1>3:6,1>4:4,1>5:5,2>4:3,2>5:7,1>0:2" --bound 1 -o sca.json ``` diff --git a/problemreductions-cli/src/cli.rs b/problemreductions-cli/src/cli.rs index f6b13c1c..3a282ff0 100644 --- a/problemreductions-cli/src/cli.rs +++ b/problemreductions-cli/src/cli.rs @@ -276,6 +276,7 @@ Flags by problem type: RectilinearPictureCompression --matrix (0/1), --k SchedulingWithIndividualDeadlines --n, --num-processors/--m, --deadlines [--precedence-pairs] SequencingToMinimizeMaximumCumulativeCost --costs, --bound [--precedence-pairs] + SequencingToMinimizeWeightedCompletionTime --lengths, --weights [--precedence-pairs] SequencingToMinimizeWeightedTardiness --sizes, --weights, --deadlines, --bound SCS --strings, --bound [--alphabet-size] StringToStringCorrection --source-string, --target-string, --bound [--alphabet-size] @@ -524,7 +525,7 @@ pub struct CreateArgs { /// Deadlines for MinimumTardinessSequencing or SchedulingWithIndividualDeadlines (comma-separated, e.g., "5,5,5,3,3") #[arg(long)] pub deadlines: Option, - /// Precedence pairs for MinimumTardinessSequencing or SchedulingWithIndividualDeadlines (e.g., "0>3,1>3,1>4,2>4") + /// Precedence pairs for MinimumTardinessSequencing, SchedulingWithIndividualDeadlines, or SequencingToMinimizeWeightedCompletionTime (e.g., "0>3,1>3,1>4,2>4") #[arg(long)] pub precedence_pairs: Option, /// Resource bounds for ResourceConstrainedScheduling (comma-separated, e.g., "20,15") @@ -735,7 +736,7 @@ mod tests { "Deadlines for MinimumTardinessSequencing or SchedulingWithIndividualDeadlines" )); assert!(help.contains( - "Precedence pairs for MinimumTardinessSequencing or SchedulingWithIndividualDeadlines" + "Precedence pairs for MinimumTardinessSequencing, SchedulingWithIndividualDeadlines, or SequencingToMinimizeWeightedCompletionTime" )); assert!( help.contains( diff --git a/problemreductions-cli/src/commands/create.rs b/problemreductions-cli/src/commands/create.rs index c71bec5c..22d76b0e 100644 --- a/problemreductions-cli/src/commands/create.rs +++ b/problemreductions-cli/src/commands/create.rs @@ -21,9 +21,9 @@ use problemreductions::models::misc::{ MultiprocessorScheduling, PaintShop, PartiallyOrderedKnapsack, QueryArg, RectilinearPictureCompression, ResourceConstrainedScheduling, SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StringToStringCorrection, SubsetSum, - SumOfSquaresPartition, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, + StringToStringCorrection, SubsetSum, SumOfSquaresPartition, }; use problemreductions::models::BiconnectivityAugmentation; use problemreductions::prelude::*; @@ -110,6 +110,7 @@ fn all_data_flags_empty(args: &CreateArgs) -> bool { && args.potential_edges.is_none() && args.budget.is_none() && args.deadlines.is_none() + && args.lengths.is_none() && args.precedence_pairs.is_none() && args.resource_bounds.is_none() && args.resource_requirements.is_none() @@ -2214,6 +2215,70 @@ pub fn create(args: &CreateArgs, out: &OutputConfig) -> Result<()> { ) } + // SequencingToMinimizeWeightedCompletionTime + "SequencingToMinimizeWeightedCompletionTime" => { + let lengths_str = args.lengths.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingToMinimizeWeightedCompletionTime requires --lengths and --weights\n\n\ + Usage: pred create SequencingToMinimizeWeightedCompletionTime --lengths 2,1,3,1,2 --weights 3,5,1,4,2 [--precedence-pairs \"0>2,1>4\"]" + ) + })?; + let weights_str = args.weights.as_deref().ok_or_else(|| { + anyhow::anyhow!( + "SequencingToMinimizeWeightedCompletionTime requires --weights\n\n\ + Usage: pred create SequencingToMinimizeWeightedCompletionTime --lengths 2,1,3,1,2 --weights 3,5,1,4,2" + ) + })?; + let lengths: Vec = util::parse_comma_list(lengths_str)?; + let weights: Vec = util::parse_comma_list(weights_str)?; + anyhow::ensure!( + lengths.len() == weights.len(), + "lengths length ({}) must equal weights length ({})", + lengths.len(), + weights.len() + ); + anyhow::ensure!( + lengths.iter().all(|&length| length > 0), + "task lengths must be positive" + ); + let num_tasks = lengths.len(); + let precedences: Vec<(usize, usize)> = match args.precedence_pairs.as_deref() { + Some(s) if !s.is_empty() => s + .split(',') + .map(|pair| { + let parts: Vec<&str> = pair.trim().split('>').collect(); + anyhow::ensure!( + parts.len() == 2, + "Invalid precedence format '{}', expected 'u>v'", + pair.trim() + ); + Ok(( + parts[0].trim().parse::()?, + parts[1].trim().parse::()?, + )) + }) + .collect::>>()?, + _ => vec![], + }; + for &(pred, succ) in &precedences { + anyhow::ensure!( + pred < num_tasks && succ < num_tasks, + "precedence index out of range: ({}, {}) but num_tasks = {}", + pred, + succ, + num_tasks + ); + } + ( + ser(SequencingToMinimizeWeightedCompletionTime::new( + lengths, + weights, + precedences, + ))?, + resolved_variant.clone(), + ) + } + // SequencingToMinimizeWeightedTardiness "SequencingToMinimizeWeightedTardiness" => { let sizes_str = args.sizes.as_deref().ok_or_else(|| { diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs index 82e902a3..a2f6a22f 100644 --- a/problemreductions-cli/tests/cli_tests.rs +++ b/problemreductions-cli/tests/cli_tests.rs @@ -2286,6 +2286,92 @@ fn test_solve_bundle_ilp() { std::fs::remove_file(&bundle_file).ok(); } +#[test] +fn test_solve_direct_ilp_i32_problem() { + let problem_file = std::env::temp_dir().join("pred_test_solve_ilp_i32_problem.json"); + + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "--example", + "SequencingToMinimizeWeightedCompletionTime", + "--to", + "ILP/i32", + "--example-side", + "target", + ]) + .output() + .unwrap(); + assert!( + create_out.status.success(), + "create stderr: {}", + String::from_utf8_lossy(&create_out.stderr) + ); + + let output = pred() + .args(["solve", problem_file.to_str().unwrap()]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("\"problem\": \"ILP\""), "{stdout}"); + assert!(stdout.contains("\"solver\": \"ilp\""), "{stdout}"); + + std::fs::remove_file(&problem_file).ok(); +} + +#[test] +fn test_solve_sequencing_to_minimize_weighted_completion_time_default_solver() { + let problem_file = std::env::temp_dir() + .join("pred_test_solve_sequencing_to_minimize_weighted_completion_time.json"); + + let create_out = pred() + .args([ + "-o", + problem_file.to_str().unwrap(), + "create", + "SequencingToMinimizeWeightedCompletionTime", + "--lengths", + "2,1,3,1,2", + "--weights", + "3,5,1,4,2", + "--precedence-pairs", + "0>2,1>4", + ]) + .output() + .unwrap(); + assert!( + create_out.status.success(), + "create stderr: {}", + String::from_utf8_lossy(&create_out.stderr) + ); + + let output = pred() + .args(["solve", problem_file.to_str().unwrap()]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!( + stdout.contains("\"problem\": \"SequencingToMinimizeWeightedCompletionTime\""), + "{stdout}" + ); + assert!(stdout.contains("\"solver\": \"ilp\""), "{stdout}"); + assert!(stdout.contains("\"solution\": ["), "{stdout}"); + + std::fs::remove_file(&problem_file).ok(); +} + #[test] fn test_solve_unknown_solver() { let problem_file = std::env::temp_dir().join("pred_test_solve_unknown.json"); @@ -2836,6 +2922,72 @@ fn test_create_steiner_tree_rejects_duplicate_terminals() { assert!(stderr.contains("terminals must be distinct"), "{stderr}"); } +#[test] +fn test_create_sequencing_to_minimize_weighted_completion_time() { + let output_file = std::env::temp_dir() + .join("pred_test_create_sequencing_to_minimize_weighted_completion_time.json"); + let output = pred() + .args([ + "-o", + output_file.to_str().unwrap(), + "create", + "SequencingToMinimizeWeightedCompletionTime", + "--lengths", + "2,1,3,1,2", + "--weights", + "3,5,1,4,2", + "--precedence-pairs", + "0>2,1>4", + ]) + .output() + .unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let content = std::fs::read_to_string(&output_file).unwrap(); + let json: serde_json::Value = serde_json::from_str(&content).unwrap(); + assert_eq!(json["type"], "SequencingToMinimizeWeightedCompletionTime"); + assert_eq!(json["data"]["lengths"], serde_json::json!([2, 1, 3, 1, 2])); + assert_eq!(json["data"]["weights"], serde_json::json!([3, 5, 1, 4, 2])); + assert_eq!( + json["data"]["precedences"], + serde_json::json!([[0, 2], [1, 4]]) + ); + std::fs::remove_file(&output_file).ok(); +} + +#[test] +fn test_create_help_describes_precedence_pairs_generically() { + let output = pred().args(["create", "--help"]).output().unwrap(); + assert!( + output.status.success(), + "stderr: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8(output.stdout).unwrap(); + assert!(stdout.contains("Precedence pairs for MinimumTardinessSequencing, SchedulingWithIndividualDeadlines, or SequencingToMinimizeWeightedCompletionTime")); +} + +#[test] +fn test_create_sequencing_to_minimize_weighted_completion_time_rejects_zero_length() { + let output = pred() + .args([ + "create", + "SequencingToMinimizeWeightedCompletionTime", + "--lengths", + "0,1,3", + "--weights", + "3,5,1", + ]) + .output() + .unwrap(); + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!(stderr.contains("task lengths must be positive"), "{stderr}"); +} + #[test] fn test_create_with_edge_weights() { let output_file = std::env::temp_dir().join("pred_test_create_ew.json"); @@ -4675,6 +4827,47 @@ fn test_inspect_stdin() { ); } +#[test] +fn test_inspect_rejects_zero_length_sequencing_problem_from_stdin() { + let create_out = pred() + .args([ + "create", + "--example", + "SequencingToMinimizeWeightedCompletionTime", + ]) + .output() + .unwrap(); + assert!( + create_out.status.success(), + "stderr: {}", + String::from_utf8_lossy(&create_out.stderr) + ); + + let mut json: serde_json::Value = serde_json::from_slice(&create_out.stdout).unwrap(); + json["data"]["lengths"][0] = serde_json::json!(0); + let invalid_json = serde_json::to_vec(&json).unwrap(); + + use std::io::Write; + let mut child = pred() + .args(["inspect", "-"]) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .unwrap(); + child + .stdin + .take() + .unwrap() + .write_all(&invalid_json) + .unwrap(); + let result = child.wait_with_output().unwrap(); + + assert!(!result.status.success()); + let stderr = String::from_utf8(result.stderr).unwrap(); + assert!(stderr.contains("task lengths must be positive"), "{stderr}"); +} + #[test] fn test_inspect_json_output() { let problem_file = std::env::temp_dir().join("pred_test_inspect_json_in.json"); diff --git a/src/lib.rs b/src/lib.rs index 9d0c6233..dc941a3e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -65,10 +65,10 @@ pub mod prelude { Knapsack, LongestCommonSubsequence, MinimumTardinessSequencing, MultiprocessorScheduling, PaintShop, Partition, QueryArg, RectilinearPictureCompression, ResourceConstrainedScheduling, SchedulingWithIndividualDeadlines, - SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedTardiness, - SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, - ShortestCommonSupersequence, StaffScheduling, StringToStringCorrection, SubsetSum, - SumOfSquaresPartition, Term, + SequencingToMinimizeMaximumCumulativeCost, SequencingToMinimizeWeightedCompletionTime, + SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, + SequencingWithinIntervals, ShortestCommonSupersequence, StaffScheduling, + StringToStringCorrection, SubsetSum, SumOfSquaresPartition, Term, }; pub use crate::models::set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, MaximumSetPacking, diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index 487769fb..37f421cb 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -20,6 +20,7 @@ //! - [`ResourceConstrainedScheduling`]: Schedule unit-length tasks on processors with resource constraints //! - [`SchedulingWithIndividualDeadlines`]: Meet per-task deadlines on parallel processors //! - [`SequencingToMinimizeMaximumCumulativeCost`]: Keep every cumulative schedule cost prefix under a bound +//! - [`SequencingToMinimizeWeightedCompletionTime`]: Minimize total weighted completion time //! - [`SequencingToMinimizeWeightedTardiness`]: Decide whether a schedule meets a weighted tardiness bound //! - [`SequencingWithReleaseTimesAndDeadlines`]: Single-machine scheduling feasibility //! - [`SequencingWithinIntervals`]: Schedule tasks within time windows @@ -47,6 +48,7 @@ mod rectilinear_picture_compression; pub(crate) mod resource_constrained_scheduling; mod scheduling_with_individual_deadlines; mod sequencing_to_minimize_maximum_cumulative_cost; +mod sequencing_to_minimize_weighted_completion_time; mod sequencing_to_minimize_weighted_tardiness; mod sequencing_with_release_times_and_deadlines; mod sequencing_within_intervals; @@ -75,6 +77,7 @@ pub use rectilinear_picture_compression::RectilinearPictureCompression; pub use resource_constrained_scheduling::ResourceConstrainedScheduling; pub use scheduling_with_individual_deadlines::SchedulingWithIndividualDeadlines; pub use sequencing_to_minimize_maximum_cumulative_cost::SequencingToMinimizeMaximumCumulativeCost; +pub use sequencing_to_minimize_weighted_completion_time::SequencingToMinimizeWeightedCompletionTime; pub use sequencing_to_minimize_weighted_tardiness::SequencingToMinimizeWeightedTardiness; pub use sequencing_with_release_times_and_deadlines::SequencingWithReleaseTimesAndDeadlines; pub use sequencing_within_intervals::SequencingWithinIntervals; @@ -104,6 +107,7 @@ pub(crate) fn canonical_model_example_specs() -> Vec", description: "Processing time l(t) for each task" }, + FieldInfo { name: "weights", type_name: "Vec", description: "Weight w(t) for each task" }, + FieldInfo { name: "precedences", type_name: "Vec<(usize, usize)>", description: "Precedence pairs (predecessor, successor)" }, + ], + } +} + +/// Sequencing to Minimize Weighted Completion Time problem. +/// +/// Given tasks with processing times `l(t)`, weights `w(t)`, and precedence +/// constraints, find a single-machine schedule that respects the precedences +/// and minimizes `sum_t w(t) * C(t)`, where `C(t)` is the completion time of +/// task `t`. +/// +/// Configurations use Lehmer code with `dims() = [n, n-1, ..., 1]`. +#[derive(Debug, Clone, Serialize)] +pub struct SequencingToMinimizeWeightedCompletionTime { + lengths: Vec, + weights: Vec, + precedences: Vec<(usize, usize)>, +} + +#[derive(Deserialize)] +struct SequencingToMinimizeWeightedCompletionTimeSerde { + lengths: Vec, + weights: Vec, + precedences: Vec<(usize, usize)>, +} + +impl SequencingToMinimizeWeightedCompletionTime { + fn validate( + lengths: &[u64], + weights: &[u64], + precedences: &[(usize, usize)], + ) -> Result<(), String> { + if lengths.len() != weights.len() { + return Err("lengths length must equal weights length".to_string()); + } + if lengths.contains(&0) { + return Err("task lengths must be positive".to_string()); + } + + let num_tasks = lengths.len(); + for &(pred, succ) in precedences { + if pred >= num_tasks { + return Err(format!( + "predecessor index {} out of range (num_tasks = {})", + pred, num_tasks + )); + } + if succ >= num_tasks { + return Err(format!( + "successor index {} out of range (num_tasks = {})", + succ, num_tasks + )); + } + } + + Ok(()) + } + + /// Create a new sequencing instance. + /// + /// # Panics + /// + /// Panics if `lengths.len() != weights.len()` or if any precedence endpoint + /// is out of range. + pub fn new(lengths: Vec, weights: Vec, precedences: Vec<(usize, usize)>) -> Self { + Self::validate(&lengths, &weights, &precedences).unwrap_or_else(|err| panic!("{err}")); + + Self { + lengths, + weights, + precedences, + } + } + + /// Returns the number of tasks. + pub fn num_tasks(&self) -> usize { + self.lengths.len() + } + + /// Returns the processing times. + pub fn lengths(&self) -> &[u64] { + &self.lengths + } + + /// Returns the task weights. + pub fn weights(&self) -> &[u64] { + &self.weights + } + + /// Returns the precedence constraints. + pub fn precedences(&self) -> &[(usize, usize)] { + &self.precedences + } + + /// Returns the number of precedence constraints. + pub fn num_precedences(&self) -> usize { + self.precedences.len() + } + + /// Returns the sum of all processing times. + pub fn total_processing_time(&self) -> u64 { + self.lengths + .iter() + .try_fold(0u64, |acc, &length| acc.checked_add(length)) + .expect("total processing time overflowed u64") + } + + fn decode_schedule(&self, config: &[usize]) -> Option> { + let n = self.num_tasks(); + if config.len() != n { + return None; + } + + let mut available: Vec = (0..n).collect(); + let mut schedule = Vec::with_capacity(n); + for &digit in config { + if digit >= available.len() { + return None; + } + schedule.push(available.remove(digit)); + } + Some(schedule) + } + + fn weighted_completion_time(&self, schedule: &[usize]) -> SolutionSize { + let n = self.num_tasks(); + let mut positions = vec![0usize; n]; + let mut completion_times = vec![0u64; n]; + let mut elapsed = 0u64; + + for (position, &task) in schedule.iter().enumerate() { + positions[task] = position; + elapsed = elapsed + .checked_add(self.lengths[task]) + .expect("total processing time overflowed u64"); + completion_times[task] = elapsed; + } + + for &(pred, succ) in &self.precedences { + if positions[pred] >= positions[succ] { + return SolutionSize::Invalid; + } + } + + let total = completion_times + .iter() + .enumerate() + .try_fold(0u64, |acc, (task, &completion)| -> Option { + let weighted_completion = completion.checked_mul(self.weights[task])?; + acc.checked_add(weighted_completion) + }) + .expect("weighted completion time overflowed u64"); + SolutionSize::Valid(total) + } +} + +impl TryFrom + for SequencingToMinimizeWeightedCompletionTime +{ + type Error = String; + + fn try_from( + value: SequencingToMinimizeWeightedCompletionTimeSerde, + ) -> Result { + Self::validate(&value.lengths, &value.weights, &value.precedences)?; + Ok(Self { + lengths: value.lengths, + weights: value.weights, + precedences: value.precedences, + }) + } +} + +impl<'de> Deserialize<'de> for SequencingToMinimizeWeightedCompletionTime { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value = SequencingToMinimizeWeightedCompletionTimeSerde::deserialize(deserializer)?; + Self::try_from(value).map_err(serde::de::Error::custom) + } +} + +impl Problem for SequencingToMinimizeWeightedCompletionTime { + const NAME: &'static str = "SequencingToMinimizeWeightedCompletionTime"; + type Metric = SolutionSize; + + fn variant() -> Vec<(&'static str, &'static str)> { + crate::variant_params![] + } + + fn dims(&self) -> Vec { + let n = self.num_tasks(); + (0..n).rev().map(|i| i + 1).collect() + } + + fn evaluate(&self, config: &[usize]) -> SolutionSize { + let Some(schedule) = self.decode_schedule(config) else { + return SolutionSize::Invalid; + }; + self.weighted_completion_time(&schedule) + } +} + +impl OptimizationProblem for SequencingToMinimizeWeightedCompletionTime { + type Value = u64; + + fn direction(&self) -> Direction { + Direction::Minimize + } +} + +crate::declare_variants! { + default opt SequencingToMinimizeWeightedCompletionTime => "factorial(num_tasks)", +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_model_example_specs() -> Vec { + vec![crate::example_db::specs::ModelExampleSpec { + id: "sequencing_to_minimize_weighted_completion_time", + instance: Box::new(SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + )), + optimal_config: vec![1, 2, 0, 1, 0], + optimal_value: serde_json::json!({"Valid": 46}), + }] +} + +#[cfg(test)] +#[path = "../../unit_tests/models/misc/sequencing_to_minimize_weighted_completion_time.rs"] +mod tests; diff --git a/src/models/mod.rs b/src/models/mod.rs index 9017b8e8..5e386165 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -33,9 +33,9 @@ pub use misc::{ MultiprocessorScheduling, PaintShop, Partition, PrecedenceConstrainedScheduling, QueryArg, RectilinearPictureCompression, ResourceConstrainedScheduling, SchedulingWithIndividualDeadlines, SequencingToMinimizeMaximumCumulativeCost, - SequencingToMinimizeWeightedTardiness, SequencingWithReleaseTimesAndDeadlines, - SequencingWithinIntervals, ShortestCommonSupersequence, StaffScheduling, - StringToStringCorrection, SubsetSum, SumOfSquaresPartition, Term, + SequencingToMinimizeWeightedCompletionTime, SequencingToMinimizeWeightedTardiness, + SequencingWithReleaseTimesAndDeadlines, SequencingWithinIntervals, ShortestCommonSupersequence, + StaffScheduling, StringToStringCorrection, SubsetSum, SumOfSquaresPartition, Term, }; pub use set::{ ComparativeContainment, ConsecutiveSets, ExactCoverBy3Sets, MaximumSetPacking, diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 77f3d894..68b87be7 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -75,6 +75,8 @@ pub(crate) mod minimumsetcovering_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod qubo_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingtominimizeweightedcompletiontime_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod travelingsalesman_ilp; pub use graph::{ @@ -125,6 +127,8 @@ pub(crate) fn canonical_rule_example_specs() -> Vec`. +//! For each unordered pair `{i, j}`, a pair of big-M constraints forces one +//! task to finish before the other starts. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingToMinimizeWeightedCompletionTime; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionSTMWCTToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionSTMWCTToILP { + #[cfg(test)] + pub(crate) fn completion_var(&self, task: usize) -> usize { + task + } + + #[cfg(test)] + pub(crate) fn order_var(&self, i: usize, j: usize) -> usize { + assert!(i < j, "order_var expects i < j"); + self.num_tasks + i * (2 * self.num_tasks - i - 1) / 2 + (j - i - 1) + } + + fn encode_schedule_as_lehmer(schedule: &[usize]) -> Vec { + let mut available: Vec = (0..schedule.len()).collect(); + let mut config = Vec::with_capacity(schedule.len()); + for &task in schedule { + let digit = available + .iter() + .position(|&candidate| candidate == task) + .expect("schedule must be a permutation"); + config.push(digit); + available.remove(digit); + } + config + } +} + +impl ReductionResult for ReductionSTMWCTToILP { + type Source = SequencingToMinimizeWeightedCompletionTime; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let mut schedule: Vec = (0..self.num_tasks).collect(); + schedule.sort_by_key(|&task| (target_solution.get(task).copied().unwrap_or(0), task)); + Self::encode_schedule_as_lehmer(&schedule) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks + num_tasks * (num_tasks - 1) / 2", + num_constraints = "2 * num_tasks + 3 * num_tasks * (num_tasks - 1) / 2 + num_precedences", +})] +impl ReduceTo> for SequencingToMinimizeWeightedCompletionTime { + type Result = ReductionSTMWCTToILP; + + fn reduce_to(&self) -> Self::Result { + let num_tasks = self.num_tasks(); + let max_ilp_value = i32::MAX as u64; + let max_exact_f64_integer = 1u64 << 53; + assert!( + self.lengths().iter().all(|&length| length <= max_ilp_value), + "task lengths must fit in ILP variable bounds" + ); + + let total_processing_time_u64 = self.total_processing_time(); + assert!( + total_processing_time_u64 <= max_ilp_value, + "total processing time must fit in ILP variable bounds" + ); + + let total_weight = self + .weights() + .iter() + .try_fold(0u64, |acc, &weight| acc.checked_add(weight)) + .expect("weighted completion objective must fit exactly in f64"); + assert!( + total_processing_time_u64 == 0 + || total_weight <= max_exact_f64_integer / total_processing_time_u64, + "weighted completion objective must fit exactly in f64" + ); + + let total_processing_time = total_processing_time_u64 as f64; + let num_order_vars = num_tasks * (num_tasks.saturating_sub(1)) / 2; + let num_vars = num_tasks + num_order_vars; + + let order_var = |i: usize, j: usize| -> usize { + debug_assert!(i < j); + num_tasks + i * (2 * num_tasks - i - 1) / 2 + (j - i - 1) + }; + + let mut constraints = Vec::new(); + + for (task, &length) in self.lengths().iter().enumerate() { + constraints.push(LinearConstraint::ge(vec![(task, 1.0)], length as f64)); + constraints.push(LinearConstraint::le( + vec![(task, 1.0)], + total_processing_time, + )); + } + + for i in 0..num_tasks { + for j in (i + 1)..num_tasks { + let order = order_var(i, j); + let completion_i = i; + let completion_j = j; + let length_i = self.lengths()[i] as f64; + let length_j = self.lengths()[j] as f64; + + constraints.push(LinearConstraint::le(vec![(order, 1.0)], 1.0)); + + // If y_{i,j} = 1, then task i is before task j: C_j - C_i >= l_j. + constraints.push(LinearConstraint::ge( + vec![ + (completion_j, 1.0), + (completion_i, -1.0), + (order, -total_processing_time), + ], + length_j - total_processing_time, + )); + + // If y_{i,j} = 0, then task j is before task i: C_i - C_j >= l_i. + constraints.push(LinearConstraint::ge( + vec![ + (completion_i, 1.0), + (completion_j, -1.0), + (order, total_processing_time), + ], + length_i, + )); + } + } + + for &(pred, succ) in self.precedences() { + constraints.push(LinearConstraint::ge( + vec![(succ, 1.0), (pred, -1.0)], + self.lengths()[succ] as f64, + )); + } + + let objective = self + .weights() + .iter() + .enumerate() + .map(|(task, &weight)| (task, weight as f64)) + .collect(); + + Self::Result { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingtominimizeweightedcompletiontime_to_ilp", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1], vec![3, 5], vec![]), + SolutionPair { + source_config: vec![1, 0], + target_config: vec![3, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingtominimizeweightedcompletiontime_ilp.rs"] +mod tests; diff --git a/src/unit_tests/models/misc/sequencing_to_minimize_weighted_completion_time.rs b/src/unit_tests/models/misc/sequencing_to_minimize_weighted_completion_time.rs new file mode 100644 index 00000000..08aeab2c --- /dev/null +++ b/src/unit_tests/models/misc/sequencing_to_minimize_weighted_completion_time.rs @@ -0,0 +1,193 @@ +use super::*; +use crate::solvers::{BruteForce, Solver}; +use crate::traits::{OptimizationProblem, Problem}; +use crate::types::{Direction, SolutionSize}; + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_basic() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + + assert_eq!(problem.num_tasks(), 5); + assert_eq!(problem.lengths(), &[2, 1, 3, 1, 2]); + assert_eq!(problem.weights(), &[3, 5, 1, 4, 2]); + assert_eq!(problem.precedences(), &[(0, 2), (1, 4)]); + assert_eq!(problem.num_precedences(), 2); + assert_eq!(problem.total_processing_time(), 9); + assert_eq!(problem.dims(), vec![5, 4, 3, 2, 1]); + assert_eq!(problem.direction(), Direction::Minimize); + assert_eq!( + ::NAME, + "SequencingToMinimizeWeightedCompletionTime" + ); + assert_eq!( + ::variant(), + vec![] + ); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_evaluate_issue_example() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + + // Lehmer [1,2,0,1,0] decodes to schedule [1,3,0,4,2]. + // Completion times are [4,1,9,2,6], so the objective is + // 3*4 + 5*1 + 1*9 + 4*2 + 2*6 = 46. + assert_eq!(problem.evaluate(&[1, 2, 0, 1, 0]), SolutionSize::Valid(46)); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_evaluate_invalid_lehmer() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![]); + + assert_eq!(problem.evaluate(&[0, 2, 0]), SolutionSize::Invalid); + assert_eq!(problem.evaluate(&[0, 1, 5]), SolutionSize::Invalid); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_evaluate_wrong_length() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![]); + + assert_eq!(problem.evaluate(&[0, 1]), SolutionSize::Invalid); + assert_eq!(problem.evaluate(&[0, 1, 2, 3]), SolutionSize::Invalid); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_evaluate_precedence_violation() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![(0, 1)]); + + assert_eq!(problem.evaluate(&[0, 0, 0]), SolutionSize::Valid(27)); + assert_eq!(problem.evaluate(&[1, 0, 0]), SolutionSize::Invalid); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_brute_force() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + let solver = BruteForce::new(); + let solution = solver.find_best(&problem).expect("should find a solution"); + + assert_eq!(solution, vec![1, 2, 0, 1, 0]); + assert_eq!(problem.evaluate(&solution), SolutionSize::Valid(46)); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_serialization() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![(0, 2)]); + let json = serde_json::to_value(&problem).unwrap(); + let restored: SequencingToMinimizeWeightedCompletionTime = + serde_json::from_value(json).unwrap(); + + assert_eq!(restored.lengths(), problem.lengths()); + assert_eq!(restored.weights(), problem.weights()); + assert_eq!(restored.precedences(), problem.precedences()); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_deserialization_rejects_zero_length_task() { + let err = + serde_json::from_value::(serde_json::json!({ + "lengths": [0, 1, 3], + "weights": [3, 5, 1], + "precedences": [], + })) + .unwrap_err(); + + assert!(err.to_string().contains("task lengths must be positive")); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_empty() { + let problem = SequencingToMinimizeWeightedCompletionTime::new(vec![], vec![], vec![]); + + assert_eq!(problem.num_tasks(), 0); + assert_eq!(problem.dims(), Vec::::new()); + assert_eq!(problem.evaluate(&[]), SolutionSize::Valid(0)); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_single_task() { + let problem = SequencingToMinimizeWeightedCompletionTime::new(vec![3], vec![2], vec![]); + + assert_eq!(problem.dims(), vec![1]); + assert_eq!(problem.evaluate(&[0]), SolutionSize::Valid(6)); +} + +#[test] +#[should_panic(expected = "lengths length must equal weights length")] +fn test_sequencing_to_minimize_weighted_completion_time_mismatched_lengths_and_weights() { + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1], vec![3], vec![]); +} + +#[test] +#[should_panic(expected = "successor index 5 out of range")] +fn test_sequencing_to_minimize_weighted_completion_time_invalid_precedence() { + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![(0, 5)]); +} + +#[test] +#[should_panic(expected = "task lengths must be positive")] +fn test_sequencing_to_minimize_weighted_completion_time_zero_length_task() { + SequencingToMinimizeWeightedCompletionTime::new(vec![0, 1, 3], vec![3, 5, 1], vec![]); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_cyclic_precedences() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3], + vec![3, 5, 1], + vec![(0, 1), (1, 2), (2, 0)], + ); + let solver = BruteForce::new(); + + assert!(solver.find_best(&problem).is_none()); +} + +#[test] +fn test_sequencing_to_minimize_weighted_completion_time_paper_example() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + let expected = vec![1, 2, 0, 1, 0]; + + assert_eq!(problem.evaluate(&expected), SolutionSize::Valid(46)); + + let solver = BruteForce::new(); + let solutions = solver.find_all_best(&problem); + assert_eq!(solutions, vec![expected]); +} + +#[test] +#[should_panic(expected = "weighted completion time overflowed u64")] +fn test_sequencing_to_minimize_weighted_completion_time_weighted_sum_overflow() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![1, 1], + vec![u64::MAX, u64::MAX], + vec![], + ); + let _ = problem.evaluate(&[0, 0]); +} + +#[test] +#[should_panic(expected = "total processing time overflowed u64")] +fn test_sequencing_to_minimize_weighted_completion_time_total_processing_time_overflow() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![u64::MAX, 1], vec![1, 1], vec![]); + let _ = problem.total_processing_time(); +} diff --git a/src/unit_tests/rules/sequencingtominimizeweightedcompletiontime_ilp.rs b/src/unit_tests/rules/sequencingtominimizeweightedcompletiontime_ilp.rs new file mode 100644 index 00000000..f62686b5 --- /dev/null +++ b/src/unit_tests/rules/sequencingtominimizeweightedcompletiontime_ilp.rs @@ -0,0 +1,159 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::models::misc::SequencingToMinimizeWeightedCompletionTime; +use crate::solvers::{BruteForce, ILPSolver, Solver}; +use crate::traits::Problem; +use crate::types::SolutionSize; + +#[test] +fn test_reduction_creates_expected_ilp_shape() { + let problem = SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1], vec![3, 5], vec![]); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 2 completion variables + 1 pair-order variable. + assert_eq!(ilp.num_vars, 3); + + // 2 lower bounds + 2 upper bounds + 1 binary upper bound + 2 disjunctive constraints. + assert_eq!(ilp.constraints.len(), 7); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + + // Objective is w_0 * C_0 + w_1 * C_1. + assert_eq!(ilp.objective, vec![(0, 3.0), (1, 5.0)]); +} + +#[test] +fn test_variable_layout_helpers() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1, 3], vec![3, 5, 1], vec![(0, 2)]); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + + assert_eq!(reduction.completion_var(0), 0); + assert_eq!(reduction.completion_var(2), 2); + assert_eq!(reduction.order_var(0, 1), 3); + assert_eq!(reduction.order_var(0, 2), 4); + assert_eq!(reduction.order_var(1, 2), 5); +} + +#[test] +fn test_extract_solution_encodes_schedule_as_lehmer_code() { + let problem = SequencingToMinimizeWeightedCompletionTime::new(vec![2, 1], vec![3, 5], vec![]); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + + // Completion times C0 = 3, C1 = 1 imply schedule [1, 0]. + // y_{0,1} = 0 means task 1 before task 0. + let extracted = reduction.extract_solution(&[3, 1, 0]); + assert_eq!(extracted, vec![1, 0]); + assert_eq!(problem.evaluate(&extracted), SolutionSize::Valid(14)); +} + +#[test] +fn test_issue_example_closed_loop() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![1, 2, 0, 1, 0]); + assert_eq!(problem.evaluate(&extracted), SolutionSize::Valid(46)); +} + +#[test] +fn test_ilp_matches_bruteforce_optimum() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + + let brute_force = BruteForce::new(); + let brute_force_solution = brute_force + .find_best(&problem) + .expect("brute force should find a schedule"); + let brute_force_metric = problem.evaluate(&brute_force_solution); + + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + let ilp_solution = ILPSolver::new().solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_metric = problem.evaluate(&extracted); + + assert_eq!(ilp_metric, brute_force_metric); +} + +#[test] +fn test_cyclic_precedence_instance_is_infeasible() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![1, 1], + vec![1, 1], + vec![(0, 1), (1, 0)], + ); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert!( + ILPSolver::new().solve(ilp).is_none(), + "cyclic precedences should make the ILP infeasible" + ); +} + +#[test] +#[should_panic(expected = "task lengths must fit in ILP variable bounds")] +fn test_reduction_panics_when_a_task_length_exceeds_i32_domain() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![(i32::MAX as u64) + 1], + vec![1], + vec![], + ); + let _: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); +} + +#[test] +#[should_panic(expected = "total processing time must fit in ILP variable bounds")] +fn test_reduction_panics_when_total_processing_time_exceeds_i32_domain() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![i32::MAX as u64, 1], + vec![1, 1], + vec![], + ); + let _: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); +} + +#[test] +#[should_panic(expected = "weighted completion objective must fit exactly in f64")] +fn test_reduction_panics_when_a_weight_exceeds_exact_f64_integer_range() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![1], vec![(1u64 << 53) + 1], vec![]); + let _: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); +} + +#[test] +#[should_panic(expected = "weighted completion objective must fit exactly in f64")] +fn test_reduction_panics_when_weighted_completion_objective_exceeds_exact_f64_range() { + let problem = + SequencingToMinimizeWeightedCompletionTime::new(vec![1, 1], vec![1 << 52, 1 << 52], vec![]); + let _: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); +} + +#[test] +fn test_solve_reduced_matches_source_optimum() { + let problem = SequencingToMinimizeWeightedCompletionTime::new( + vec![2, 1, 3, 1, 2], + vec![3, 5, 1, 4, 2], + vec![(0, 2), (1, 4)], + ); + let reduction: ReductionSTMWCTToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let source_solution = reduction.extract_solution(&ilp_solution); + + assert_eq!(source_solution, vec![1, 2, 0, 1, 0]); + assert_eq!(problem.evaluate(&source_solution), SolutionSize::Valid(46)); +}