From 45969393637cc7d65206d9d2fa0fc656a03cdb95 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 15:42:12 +0800 Subject: [PATCH 1/6] docs: add Tier 3 ILP reduction paper entries, design spec, and implementation plan - 39 new reduction-rule entries in reductions.typ with standardized multiline ILP equation blocks (variables, constraints, objective) - Expanded 9 complex entries with full variable indexing, big-M values, and flow schemes (MixedChinesePostman, StackerCrane, AcyclicPartition, BiconnectivityAugmentation, BoundedComponentSpanningForest, StrongConnectivityAugmentation, ConsecutiveOnesMatrixAugmentation, StringToStringCorrection, RootedTreeStorageAssignment) - Added equation blocks to all existing Tier 1/2 ILP entries - Fixed ShortestWeightConstrainedPath MTZ constraint inversion - Fixed 10 undefined/mismatched symbols across entries - Standardized ILP problem-def with multiline equation - Design spec: docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md - Implementation plan: docs/superpowers/plans/2026-03-24-tier3-ilp-reductions.md 2 problems deferred: PartialFeedbackEdgeSet (no poly-size ILP for L --- docs/paper/reductions.typ | 1517 ++++++++++++++++- .../plans/2026-03-24-tier3-ilp-reductions.md | 547 ++++++ .../2026-03-24-tier3-ilp-reductions-design.md | 220 +++ 3 files changed, 2244 insertions(+), 40 deletions(-) create mode 100644 docs/superpowers/plans/2026-03-24-tier3-ilp-reductions.md create mode 100644 docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index ed7f490b..5d6f7602 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -3164,7 +3164,12 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], } [ #problem-def("ILP")[ - Given $n$ variables $bold(x)$ over a domain $cal(D)$ (binary $cal(D) = {0,1}$ or integer $cal(D) = ZZ_(>=0)$), constraint matrix $A in RR^(m times n)$, bounds $bold(b) in RR^m$, and objective $bold(c) in RR^n$, find $bold(x) in cal(D)^n$ minimizing $bold(c)^top bold(x)$ subject to $A bold(x) <= bold(b)$. + Given $n$ variables $bold(x)$ over a domain $cal(D)$ (binary $cal(D) = {0,1}$ or integer $cal(D) = ZZ_(>=0)$), constraint matrix $A in RR^(m times n)$, bounds $bold(b) in RR^m$, and objective $bold(c) in RR^n$, solve + $ + min quad & bold(c)^top bold(x) \ + "subject to" quad & A bold(x) <= bold(b) \ + & bold(x) in cal(D)^n + $. ][ Integer Linear Programming is a universal modeling framework: virtually every NP-hard combinatorial optimization problem admits an ILP formulation. Relaxing integrality to $bold(x) in RR^n$ yields a linear program solvable in polynomial time, forming the basis of branch-and-bound solvers. When the number of integer variables $n$ is fixed, ILP is solvable in polynomial time by Lenstra's algorithm @lenstra1983 using the geometry of numbers, making it fixed-parameter tractable in $n$. The best known general algorithm achieves $O^*(n^n)$ via an FPT algorithm based on lattice techniques @dadush2012. @@ -4631,6 +4636,18 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], _Objective:_ Minimize $0$ (feasibility problem). + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(x in D_a) y_(v,a,x) = 1 quad forall v in V, a in A \ + & y_(v,a,x) = 1 quad forall (v, a, x) in K \ + & z_(t,v,x,x') <= y_(v,a,x) quad forall t in cal(T), v in V, (x, x') in D_a times D_b \ + & z_(t,v,x,x') <= y_(v,b,x') quad forall t in cal(T), v in V, (x, x') in D_a times D_b \ + & z_(t,v,x,x') >= y_(v,a,x) + y_(v,b,x') - 1 quad forall t in cal(T), v in V, (x, x') in D_a times D_b \ + & sum_(v in V) z_(t,v,x,x') = f_t(x, x') quad forall t in cal(T), (x, x') in D_a times D_b \ + & y_(v,a,x), z_(t,v,x,x') in {0, 1} + $. + _Correctness._ ($arrow.r.double$) A consistent assignment defines one-hot indicators and their products; all constraints hold by construction, and the frequency equalities match the published counts. ($arrow.l.double$) Any feasible binary solution assigns exactly one value per object-attribute (one-hot), respects known values, and the McCormick constraints force $z_(t,v,x,x') = y_(v,a,x) dot y_(v,b,x')$ for binary variables, so the frequency equalities certify consistency. _Solution extraction._ For each object $v$ and attribute $a$, find $x$ with $y_(v,a,x) = 1$; assign value $x$ to $(v, a)$. @@ -7143,6 +7160,15 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _ILP formulation._ Minimize $sum_i Q_(i i) x_i + sum_(i < j) Q_(i j) y_(i j)$ subject to the McCormick constraints and $x_i, y_(i j) in {0, 1}$. + The ILP is: + $ + min quad & sum_i Q_(i i) x_i + sum_(i < j) Q_(i j) y_(i j) \ + "subject to" quad & y_(i j) <= x_i quad forall i < j, Q_(i j) != 0 \ + & y_(i j) <= x_j quad forall i < j, Q_(i j) != 0 \ + & y_(i j) >= x_i + x_j - 1 quad forall i < j, Q_(i j) != 0 \ + & x_i, y_(i j) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) For binary $x_i, x_j$, the three McCormick inequalities are tight: $y_(i j) = x_i x_j$ is the unique feasible value. Hence the ILP objective equals $bold(x)^top Q bold(x)$, and any ILP minimizer is a QUBO minimizer. ($arrow.l.double$) Given a QUBO minimizer $bold(x)^*$, setting $y_(i j) = x_i^* x_j^*$ satisfies all constraints and achieves the same objective value. _Solution extraction._ Return the first $n$ variables (discard auxiliary $y_(i j)$). @@ -7177,6 +7203,21 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _Objective._ Minimize $0$ (feasibility problem): any feasible solution satisfies the circuit. + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & c + a = 1 quad "for each NOT gate" \ + & c <= a_i quad forall i quad "for each AND gate" \ + & c >= sum_i a_i - (k - 1) quad "for each AND gate" \ + & c >= a_i quad forall i quad "for each OR gate" \ + & c <= sum_i a_i quad "for each OR gate" \ + & c <= a + b quad "for each XOR gate" \ + & c >= a - b quad "for each XOR gate" \ + & c >= b - a quad "for each XOR gate" \ + & c <= 2 - a - b quad "for each XOR gate" \ + & "all gate and input variables are binary" + $. + _Correctness._ ($arrow.r.double$) Each gate encoding is the convex hull of the gate's truth table rows (viewed as binary vectors), so a satisfying circuit assignment satisfies all constraints. ($arrow.l.double$) Any binary feasible solution respects every gate's input-output relation, and since gates are composed in topological order, the full circuit evaluates to true. _Solution extraction._ Return values of the named circuit variables. @@ -7477,6 +7518,14 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _Objective:_ Feasibility problem (minimize 0). + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(c=1)^k x_(v,c) = 1 quad forall v in V \ + & x_(u,c) + x_(v,c) <= 1 quad forall (u, v) in E, c in {1, dots, k} \ + & x_(v,c) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) A valid $k$-coloring assigns exactly one color per vertex with different colors on adjacent vertices; setting $x_(v,c) = 1$ for the assigned color satisfies all constraints. ($arrow.l.double$) Any feasible ILP solution has exactly one $x_(v,c) = 1$ per vertex; this defines a coloring, and constraint (2) ensures adjacent vertices differ. _Solution extraction._ For each vertex $v$, find $c$ with $x_(v,c) = 1$; assign color $c$ to $v$. @@ -7498,6 +7547,17 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _No overflow:_ $c_(m+n-1) = 0$. + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & z_(i j) <= p_i quad forall i, j \ + & z_(i j) <= q_j quad forall i, j \ + & z_(i j) >= p_i + q_j - 1 quad forall i, j \ + & sum_(i+j=k) z_(i j) + c_(k-1) = N_k + 2 c_k quad forall k in {0, dots, m + n - 1} \ + & c_(m+n-1) = 0 \ + & p_i, q_j, z_(i j) in {0, 1}, c_k in ZZ_(>=0) + $. + _Correctness._ The McCormick constraints enforce $z_(i j) = p_i dot q_j$ for binary variables. The bit equations encode $p times q = N$ via carry propagation, matching array multiplier semantics. _Solution extraction._ Read $p = sum_i p_i 2^i$ and $q = sum_j q_j 2^j$ from the binary variables. @@ -7510,7 +7570,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MaximumSetPacking", "ILP")[ Each set is either selected or not, and every universe element may belong to at most one selected set -- an element-based constraint that is directly linear in binary indicator variables. ][ - _Construction._ Variables: $x_i in {0, 1}$ for each set $S_i in cal(S)$. Constraints: $sum_(S_i in.rev e) x_i <= 1$ for each element $e in U$. Objective: maximize $sum_i w_i x_i$. + _Construction._ Variables: $x_i in {0, 1}$ for each set $S_i in cal(S)$. The ILP is: + $ + max quad & sum_i w_i x_i \ + "subject to" quad & sum_(S_i in.rev e) x_i <= 1 quad forall e in U \ + & x_i in {0, 1} quad forall i + $. _Correctness._ ($arrow.r.double$) A valid packing chooses pairwise disjoint sets, so each element is covered at most once. ($arrow.l.double$) Any feasible binary solution covers each element at most once, hence the chosen sets are pairwise disjoint; the objective maximizes total weight. @@ -7520,7 +7585,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MaximumMatching", "ILP")[ Each edge is either selected or not, and each vertex may be incident to at most one selected edge -- a degree-bound constraint that is directly linear in binary edge indicators. ][ - _Construction._ Variables: $x_e in {0, 1}$ for each $e in E$. Constraints: $sum_(e in.rev v) x_e <= 1$ for each $v in V$. Objective: maximize $sum_e w_e x_e$. + _Construction._ Variables: $x_e in {0, 1}$ for each $e in E$. The ILP is: + $ + max quad & sum_e w_e x_e \ + "subject to" quad & sum_(e in.rev v) x_e <= 1 quad forall v in V \ + & x_e in {0, 1} quad forall e in E + $. _Correctness._ ($arrow.r.double$) A matching has at most one edge per vertex, so all degree constraints hold. ($arrow.l.double$) Any feasible solution is a matching by construction; the objective maximizes total weight. @@ -7530,7 +7600,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinimumSetCovering", "ILP")[ Every universe element must be covered by at least one selected set -- a lower-bound constraint on the sum of indicators for sets containing that element, which is directly linear. ][ - _Construction._ Variables: $x_i in {0, 1}$ for each $S_i in cal(S)$. Constraints: $sum_(S_i in.rev u) x_i >= 1$ for each $u in U$. Objective: minimize $sum_i w_i x_i$. + _Construction._ Variables: $x_i in {0, 1}$ for each $S_i in cal(S)$. The ILP is: + $ + min quad & sum_i w_i x_i \ + "subject to" quad & sum_(S_i in.rev u) x_i >= 1 quad forall u in U \ + & x_i in {0, 1} quad forall i + $. _Correctness._ ($arrow.r.double$) A set cover includes at least one set containing each element, satisfying all constraints. ($arrow.l.double$) Any feasible solution covers every element; the objective minimizes total weight. @@ -7540,7 +7615,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinimumDominatingSet", "ILP")[ Every vertex must be dominated -- either selected itself or adjacent to a selected vertex -- which is a lower-bound constraint on the sum of indicators over its closed neighborhood. ][ - _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. Constraints: $x_v + sum_(u in N(v)) x_u >= 1$ for each $v in V$ (each vertex dominated). Objective: minimize $sum_v w_v x_v$. + _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. The ILP is: + $ + min quad & sum_v w_v x_v \ + "subject to" quad & x_v + sum_(u in N(v)) x_u >= 1 quad forall v in V \ + & x_v in {0, 1} quad forall v in V + $. _Correctness._ ($arrow.r.double$) A dominating set includes a vertex or one of its neighbors for every vertex, satisfying all constraints. ($arrow.l.double$) Any feasible solution dominates every vertex; the objective minimizes total weight. @@ -7558,6 +7638,13 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective:_ Minimize $sum_v w_v x_v$. + The ILP is: + $ + min quad & sum_v w_v x_v \ + "subject to" quad & o_v - o_u >= 1 - n (x_u + x_v) quad forall (u -> v) in A \ + & x_v in {0, 1}, o_v in {0, dots, n - 1} quad forall v in V + $. + _Correctness._ ($arrow.r.double$) If $S$ is a feedback vertex set, then $G[V backslash S]$ is a DAG with a topological ordering. Set $x_v = 1$ for $v in S$, $o_v$ to the topological position for kept vertices, and $o_v = 0$ for removed vertices. All constraints are satisfied. ($arrow.l.double$) If the ILP is feasible with all arc constraints satisfied, no directed cycle can exist among kept vertices: a cycle $v_1 -> dots -> v_k -> v_1$ would require $o_(v_1) < o_(v_2) < dots < o_(v_k) < o_(v_1)$, a contradiction. _Solution extraction._ $S = {v : x_v = 1}$. @@ -7566,7 +7653,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MaximumClique", "ILP")[ A clique requires every pair of selected vertices to be adjacent; equivalently, no two selected vertices may share a _non_-edge. This is the independent set formulation on the complement graph $overline(G)$. ][ - _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. Constraints: $x_u + x_v <= 1$ for each $(u, v) in.not E$ (non-edges). Objective: maximize $sum_v x_v$. + _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. The ILP is: + $ + max quad & sum_v x_v \ + "subject to" quad & x_u + x_v <= 1 quad forall (u, v) in.not E \ + & x_v in {0, 1} quad forall v in V + $. _Correctness._ ($arrow.r.double$) In a clique, every pair of selected vertices is adjacent, so no non-edge constraint is violated. ($arrow.l.double$) Any feasible solution selects only mutually adjacent vertices, forming a clique; the objective maximizes its size. @@ -7609,6 +7701,15 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective._ Minimize $sum_(e in E) y_e$. + The ILP is: + $ + min quad & sum_(e in E) y_e \ + "subject to" quad & sum_(v in V) x_v = n / 2 \ + & y_e >= x_u - x_v quad forall e = (u, v) in E \ + & y_e >= x_v - x_u quad forall e = (u, v) in E \ + & x_v in {0, 1}, y_e in {0, 1} + $. + _Correctness._ ($arrow.r.double$) Given a balanced partition $(A, B)$, set $x_v = 1$ iff $v in B$, and set $y_e = 1$ iff edge $e$ has one endpoint in each side. The balance constraint holds because $|B| = n / 2$, and the linking inequalities hold because $|x_u - x_v| = 1$ exactly on crossing edges. The objective is therefore the cut size. ($arrow.l.double$) Any feasible ILP solution satisfies the balance equation, so exactly half the vertices have $x_v = 1$ when $n$ is even. For each edge, the linking inequalities imply $y_e >= |x_u - x_v|$; minimization therefore chooses $y_e = |x_u - x_v|$, making the objective count precisely the crossing edges of the extracted partition. _Solution extraction._ Return the first $n$ variables $(x_v)_(v in V)$ as the Graph Partitioning configuration; the edge-indicator variables are auxiliary. @@ -7645,11 +7746,13 @@ The following reductions to Integer Linear Programming are straightforward formu )[ A 0-1 Knapsack instance is already a binary Integer Linear Program @papadimitriou-steiglitz1982: each item-selection bit becomes a binary variable, the capacity condition is a single linear inequality, and the value objective is linear. The reduction preserves the number of decision variables exactly, producing an ILP with $n$ variables and one constraint. ][ - _Construction._ Given nonnegative weights $w_0, dots, w_(n-1)$, nonnegative values $v_0, dots, v_(n-1)$, and capacity $C$, introduce binary variables $x_0, dots, x_(n-1) in {0,1}$ where $x_i = 1$ iff item $i$ is selected. Construct the binary ILP: - $ max sum_(i=0)^(n-1) v_i x_i $ - subject to - $ sum_(i=0)^(n-1) w_i x_i <= C $ - and $x_i in {0,1}$ for all $i$. The target therefore has exactly $n$ variables and one linear constraint. + _Construction._ Given nonnegative weights $w_0, dots, w_(n-1)$, nonnegative values $v_0, dots, v_(n-1)$, and capacity $C$, introduce binary variables $x_0, dots, x_(n-1) in {0,1}$ where $x_i = 1$ iff item $i$ is selected. The ILP is: + $ + max quad & sum_(i=0)^(n-1) v_i x_i \ + "subject to" quad & sum_(i=0)^(n-1) w_i x_i <= C \ + & x_i in {0, 1} quad forall i in {0, dots, n - 1} + $. + The target therefore has exactly $n$ variables and one linear constraint. _Correctness._ ($arrow.r.double$) Any feasible knapsack solution $bold(x)$ satisfies $sum_i w_i x_i <= C$, so the same binary vector is feasible for the ILP and attains identical objective value $sum_i v_i x_i$. ($arrow.l.double$) Any feasible binary ILP solution selects exactly the items with $x_i = 1$; the single inequality guarantees the chosen set fits in the knapsack, and the ILP objective equals the knapsack value. Therefore optimal solutions correspond one-to-one and preserve the optimum value. @@ -7690,6 +7793,14 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective:_ Minimize $sum_(j=0)^(n-1) y_j$. + The ILP is: + $ + min quad & sum_(j=0)^(n-1) y_j \ + "subject to" quad & sum_(j=0)^(n-1) x_(i j) = 1 quad forall i in {0, dots, n - 1} \ + & sum_(i=0)^(n-1) s_i x_(i j) <= C y_j quad forall j in {0, dots, n - 1} \ + & x_(i j), y_j in {0, 1} + $. + _Correctness._ ($arrow.r.double$) A valid packing assigns each item to exactly one bin (satisfying (1)); each bin's load is at most $C$ and $y_j = 1$ for any used bin (satisfying (2)). ($arrow.l.double$) Any feasible solution assigns each item to one bin by (1), respects capacity by (2), and the objective counts the number of open bins. _Solution extraction._ For each item $i$, find the unique $j$ with $x_(i j) = 1$; assign item $i$ to bin $j$. @@ -7711,6 +7822,15 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective._ Minimize 0. The target is a pure feasibility ILP, so any constant objective works. + The ILP is: + $ + "find" quad & (x_i)_(i = 0)^(m - 1) \ + "subject to" quad & sum_(a_i in I_j) x_i <= c_j quad forall j in {1, dots, k} \ + & sum_(a_i = (u, v) in A) x_i - sum_(a_i = (v, w) in A) x_i = 0 quad forall v in V backslash {s, t} \ + & sum_(a_i = (u, t) in A) x_i - sum_(a_i = (t, w) in A) x_i >= R \ + & x_i in ZZ_(>=0) quad forall i in {0, dots, m - 1} + $. + _Correctness._ ($arrow.r.double$) Any satisfying bundled flow assigns a non-negative integer to each arc, satisfies every bundle inequality by definition, satisfies every nonterminal conservation equality, and yields sink inflow at least $R$, so it is a feasible ILP solution. ($arrow.l.double$) Any feasible ILP solution gives non-negative integral arc values obeying the same bundle, conservation, and sink-inflow constraints, hence it is a satisfying solution to the original Integral Flow with Bundles instance. _Solution extraction._ Identity: read the ILP vector $(x_0, dots, x_(m-1))$ directly as the arc-flow vector of the source problem. @@ -7733,6 +7853,16 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective._ Minimize $sum_j w_j C_j$. + The ILP is: + $ + min quad & sum_j w_j C_j \ + "subject to" quad & l_j <= C_j <= M quad forall j \ + & C_j - C_i >= l_j quad forall i prec.eq j \ + & C_j - C_i + M (1 - y_(i j)) >= l_j quad forall i < j \ + & C_i - C_j + M y_(i j) >= l_i quad forall i < j \ + & y_(i j) in {0, 1}, C_j in ZZ_(>=0) + $. + _Correctness._ ($arrow.r.double$) Any feasible schedule defines completion times and pairwise order values satisfying the bounds, precedence inequalities, and disjunctive machine constraints; its weighted completion time is exactly the ILP objective. ($arrow.l.double$) Any feasible ILP solution assigns a strict order to every task pair and forbids overlap, so the completion times correspond to a valid single-machine schedule that respects all precedences. Minimizing the ILP objective therefore minimizes the original weighted completion-time objective. _Solution extraction._ Sort tasks by their completion times $C_j$ and encode that order back into the source schedule representation. @@ -7802,6 +7932,18 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective:_ Minimize $sum_((u,v) in E) w(u,v) dot sum_k (y_(u,v,k) + y_(v,u,k))$. + The ILP is: + $ + min quad & sum_((u,v) in E) w(u,v) sum_k (y_(u,v,k) + y_(v,u,k)) \ + "subject to" quad & sum_(k=0)^(n-1) x_(v,k) = 1 quad forall v in V \ + & sum_(v in V) x_(v,k) = 1 quad forall k in {0, dots, n - 1} \ + & x_(v,k) + x_(w,(k+1) mod n) <= 1 quad forall {v, w} in.not E, k in {0, dots, n - 1} \ + & y_(u,v,k) <= x_(u,k) quad forall (u, v) in E, k in {0, dots, n - 1} \ + & y_(u,v,k) <= x_(v,(k+1) mod n) quad forall (u, v) in E, k in {0, dots, n - 1} \ + & y_(u,v,k) >= x_(u,k) + x_(v,(k+1) mod n) - 1 quad forall (u, v) in E, k in {0, dots, n - 1} \ + & x_(v,k), y_(u,v,k) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) A valid tour defines a permutation matrix $(x_(v,k))$ satisfying constraints (1)--(2); consecutive vertices are adjacent by construction, so (3) holds; McCormick constraints (4) force $y = x_(u,k) x_(v,k+1)$, making the objective equal to the tour cost. ($arrow.l.double$) Any feasible binary solution defines a permutation (by (1)--(2)) where consecutive positions are connected by edges (by (3)), forming a Hamiltonian tour; the linearized objective equals the tour cost. _Solution extraction._ For each position $k$, find vertex $v$ with $x_(v,k) = 1$ to recover the tour permutation; then select edges between consecutive positions. @@ -7839,6 +7981,19 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective._ Maximize $sum_({u,v} in E) l({u,v}) dot (x_(u,v) + x_(v,u))$. + The ILP is: + $ + max quad & sum_({u,v} in E) l({u,v}) (x_(u,v) + x_(v,u)) \ + "subject to" quad & sum_(w : {v,w} in E) x_(v,w) - sum_(u : {u,v} in E) x_(u,v) = b_v quad forall v in V \ + & sum_(w : {v,w} in E) x_(v,w) <= 1 quad forall v in V \ + & sum_(u : {u,v} in E) x_(u,v) <= 1 quad forall v in V \ + & x_(u,v) + x_(v,u) <= 1 quad forall {u, v} in E \ + & o_v - o_u >= 1 - n (1 - x_(u,v)) quad forall u -> v \ + & o_s = 0 \ + & x_(u,v) in {0, 1}, o_v in {0, dots, n - 1} + $, + where $b_s = 1$, $b_t = -1$, and $b_v = 0$ otherwise. + _Correctness._ ($arrow.r.double$) Any simple $s$-$t$ path can be oriented from $s$ to $t$, giving exactly one outgoing arc at $s$, one incoming arc at $t$, balanced flow at every internal vertex, and strictly increasing order values along the path. ($arrow.l.double$) Any feasible ILP solution satisfies the flow equations and degree bounds, so the selected arcs form vertex-disjoint directed paths and cycles. The ordering inequalities make every selected arc increase the order value by at least 1, so directed cycles are impossible. The only remaining positive-flow component is therefore a single directed $s$-$t$ path, whose objective is exactly the total selected edge length. _Solution extraction._ For each undirected edge ${u, v}$, select it in the source configuration iff either $x_(u,v)$ or $x_(v,u)$ is 1. @@ -7890,6 +8045,16 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective:_ Use the zero objective. The target ILP is feasible iff the source LCS instance is a YES instance. + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(a in Sigma) x_(p, a) = 1 quad forall p in {1, dots, K} \ + & sum_(j = 1)^(|r_i|) y_(i, p, j) = 1 quad forall i, p \ + & y_(i, p, j) <= x_(p, a) quad forall i, p, j " with " r_i[j] = a \ + & y_(i, p, j') + y_(i, p + 1, j) <= 1 quad forall i, p, j' >= j \ + & x_(p, a), y_(i, p, j) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) If a witness $w = w_1 dots w_K$ is a common subsequence of every string, set $x_(p, w_p) = 1$ and choose, in every $r_i$, the positions where that embedding occurs. Constraints (1)--(4) are satisfied, so the ILP is feasible. ($arrow.l.double$) Any feasible ILP solution selects exactly one symbol for each witness position and exactly one realization in each source string. Character consistency ensures the chosen positions spell the same witness string in every input string, and the ordering constraints ensure those positions are strictly increasing. Therefore the extracted witness is a common subsequence of length $K$. _Solution extraction._ For each witness position $p$, read the unique symbol $a$ with $x_(p, a) = 1$ and output the resulting length-$K$ string. @@ -7906,6 +8071,17 @@ The following reductions to Integer Linear Programming are straightforward formu _Objective:_ Minimize $sum_(e in E) w_e dot x_e$. + The ILP is: + $ + min quad & sum_(e in E) w_e x_e \ + "subject to" quad & y_(i, t_i) = 1 quad forall i in {0, dots, k - 1} \ + & y_(j, t_i) = 0 quad forall i != j \ + & sum_(i=0)^(k-1) y_(i v) = 1 quad forall v in V \ + & x_e >= y_(i u) - y_(i v) quad forall e = (u, v) in E, i in {0, dots, k - 1} \ + & x_e >= y_(i v) - y_(i u) quad forall e = (u, v) in E, i in {0, dots, k - 1} \ + & x_e, y_(i v) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) A multiway cut $C$ partitions $V$ into $k$ components, one per terminal. Setting $y_(i v) = 1$ iff $v$ is in $t_i$'s component and $x_e = 1$ iff $e in C$ satisfies all constraints: partition by construction, terminal fixing by definition, and linking because any edge with endpoints in different components is in $C$. The objective equals the cut weight. ($arrow.l.double$) Any feasible ILP solution defines a valid partition (by constraint (2)) separating all terminals (by constraint (1)). The linking constraints (3) force $x_e = 1$ for all cross-component edges, so the objective is at least the multiway cut weight; minimization ensures optimality. _Solution extraction._ For each edge $e$ at index $"idx"$, read $x_e = x^*_(k n + "idx")$. The source configuration is $"config"[e] = x_e$ (1 = cut, 0 = keep). @@ -7955,6 +8131,15 @@ The following reductions to Integer Linear Programming are straightforward formu $ f^t_(u,v) <= y_e quad "and" quad f^t_(v,u) <= y_e. $ Binary flow variables suffice because any Steiner tree yields a unique simple root-to-terminal path for each commodity, so every commodity can be realized as a 0/1 path indicator. + The ILP is: + $ + min quad & sum_(e in E) w_e y_e \ + "subject to" quad & sum_(u : (u, v) in A) f^t_(u,v) - sum_(u : (v, u) in A) f^t_(v,u) = b_(t,v) quad forall t in T backslash {r}, v in V \ + & f^t_(u,v) <= y_e quad forall t in T backslash {r}, e = {u, v} in E \ + & f^t_(v,u) <= y_e quad forall t in T backslash {r}, e = {u, v} in E \ + & y_e, f^t_(u,v) in {0, 1} + $. + _Correctness._ ($arrow.r.double$) If $S subset.eq E$ is a Steiner tree, set $y_e = 1$ exactly for $e in S$. For each non-root terminal $t$, the unique path from $r$ to $t$ inside the tree defines a binary flow assignment satisfying the conservation equations, and every used arc lies on a selected edge, so all linking inequalities hold. The ILP objective equals $sum_(e in S) w_e$. ($arrow.l.double$) Any feasible ILP solution with edge selector set $Y = {e in E : y_e = 1}$ supports one unit of flow from $r$ to every non-root terminal, so the selected edges contain a connected subgraph spanning all terminals. Because all edge weights are strictly positive, any cycle in the selected subgraph has positive total cost; the optimizer therefore never includes redundant edges, so the selected subgraph is already a Steiner tree. Therefore an optimal ILP solution induces a minimum-cost Steiner tree. _Variable mapping._ The first $m$ ILP variables are the source-edge indicators $y_0, dots, y_(m-1)$ in source edge order. For terminal $t_p$ with $p in {1, dots, k-1}$, the next block of $2 m$ variables stores the directed arc indicators $f^(t_p)_(u,v)$ and $f^(t_p)_(v,u)$ for each source edge $(u, v)$. @@ -7967,7 +8152,12 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinimumHittingSet", "ILP")[ Each set must contain at least one selected element -- a standard set-covering constraint on the element indicators. ][ - _Construction._ Variables: $x_e in {0, 1}$ for each element $e in U$. Constraints: $sum_(e in S) x_e >= 1$ for each set $S in cal(S)$. Objective: minimize $sum_e x_e$. + _Construction._ Variables: $x_e in {0, 1}$ for each element $e in U$. The ILP is: + $ + min quad & sum_e x_e \ + "subject to" quad & sum_(e in S) x_e >= 1 quad forall S in cal(S) \ + & x_e in {0, 1} quad forall e in U + $. _Correctness._ ($arrow.r.double$) A hitting set includes at least one element from each set. ($arrow.l.double$) Any feasible solution hits every set. @@ -7977,7 +8167,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("ExactCoverBy3Sets", "ILP")[ Each element must be covered by exactly one triple, and the number of selected triples must equal $|U|\/3$. ][ - _Construction._ Variables: $x_j in {0, 1}$ for each triple $T_j$. Constraints: $sum_(j : e in T_j) x_j = 1$ for each $e in U$; $sum_j x_j = |U|\/3$. Objective: feasibility. + _Construction._ Variables: $x_j in {0, 1}$ for each triple $T_j$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(j : e in T_j) x_j = 1 quad forall e in U \ + & sum_j x_j = |U| / 3 \ + & x_j in {0, 1} quad forall j + $. _Correctness._ The equality constraints force each element to appear in exactly one selected triple, which is the definition of an exact cover. @@ -7987,7 +8183,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("NAESatisfiability", "ILP")[ Each clause must have at least one true and at least one false literal, encoded as a pair of linear inequalities per clause. ][ - _Construction._ Variables: $x_i in {0, 1}$ per Boolean variable. For each clause $C$ with literals $l_1, dots, l_k$, substitute $l_i = x_i$ for positive and $l_i = 1 - x_i$ for negative literals: (1) $sum "coeff"_i dot x_i >= 1 - "neg"$ (at least one true); (2) $sum "coeff"_i dot x_i <= |C| - 1 - "neg"$ (at least one false). Objective: feasibility. + _Construction._ Variables: $x_i in {0, 1}$ per Boolean variable. For each clause $C$ with literals $l_1, dots, l_k$, substitute $l_i = x_i$ for positive and $l_i = 1 - x_i$ for negative literals. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_i "coeff"_(C,i) x_i >= 1 - "neg"(C) quad "for each clause" C \ + & sum_i "coeff"_(C,i) x_i <= |C| - 1 - "neg"(C) quad "for each clause" C \ + & x_i in {0, 1} quad forall i + $. _Correctness._ The two constraints per clause jointly enforce the not-all-equal condition. @@ -7997,7 +8199,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("KClique", "ILP")[ A $k$-clique requires at least $k$ selected vertices with no non-edge between any pair. ][ - _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. Constraints: $sum_v x_v >= k$; $x_u + x_v <= 1$ for each non-edge $(u, v) in.not E$. Objective: feasibility. + _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_v x_v >= k \ + & x_u + x_v <= 1 quad forall (u, v) in.not E \ + & x_v in {0, 1} quad forall v in V + $. _Correctness._ ($arrow.r.double$) A $k$-clique selects $>= k$ mutually adjacent vertices, satisfying all constraints. ($arrow.l.double$) Any feasible solution selects $>= k$ vertices with no non-edge pair, forming a clique of size $>= k$. @@ -8007,7 +8215,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MaximalIS", "ILP")[ An independent set that is also maximal: no vertex outside the set can be added without violating independence. ][ - _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. Constraints: (1) Independence: $x_u + x_v <= 1$ for each edge $(u, v) in E$. (2) Maximality: $x_v + sum_(u in N(v)) x_u >= 1$ for each $v in V$. Objective: maximize $sum_v w_v x_v$. + _Construction._ Variables: $x_v in {0, 1}$ for each $v in V$. The ILP is: + $ + max quad & sum_v w_v x_v \ + "subject to" quad & x_u + x_v <= 1 quad forall (u, v) in E \ + & x_v + sum_(u in N(v)) x_u >= 1 quad forall v in V \ + & x_v in {0, 1} quad forall v in V + $. _Correctness._ Independence constraints prevent adjacent selections; maximality constraints ensure every vertex is either selected or has a selected neighbor. @@ -8017,7 +8231,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("PartiallyOrderedKnapsack", "ILP")[ Standard knapsack with precedence constraints: item $b$ can only be selected if item $a$ is also selected for each precedence $(a, b)$. ][ - _Construction._ Variables: $x_i in {0, 1}$ per item. Constraints: $sum_i w_i x_i <= C$ (capacity); $x_b <= x_a$ for each precedence $(a, b)$. Objective: maximize $sum_i v_i x_i$. + _Construction._ Variables: $x_i in {0, 1}$ per item. The ILP is: + $ + max quad & sum_i v_i x_i \ + "subject to" quad & sum_i w_i x_i <= C \ + & x_b <= x_a quad "for each precedence" (a, b) \ + & x_i in {0, 1} quad forall i + $. _Correctness._ Capacity and precedence constraints are directly linear. Any feasible ILP solution is a valid knapsack packing respecting the partial order. @@ -8027,7 +8247,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("RectilinearPictureCompression", "ILP")[ Cover all 1-cells with at most $B$ maximal all-1 rectangles. ][ - _Construction._ Variables: $x_r in {0, 1}$ per maximal rectangle $r$. Constraints: $sum_(r "covers" (i,j)) x_r >= 1$ for each 1-cell $(i, j)$; $sum_r x_r <= B$. Objective: feasibility. + _Construction._ Variables: $x_r in {0, 1}$ per maximal rectangle $r$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(r "covers" (i,j)) x_r >= 1 quad forall (i, j) "with source cell" = 1 \ + & sum_r x_r <= B \ + & x_r in {0, 1} quad forall r + $. _Correctness._ Coverage constraints ensure every 1-cell is covered; the cardinality bound limits the number of rectangles. @@ -8035,19 +8261,38 @@ The following reductions to Integer Linear Programming are straightforward formu ] #reduction-rule("ShortestWeightConstrainedPath", "ILP")[ - Find an $s$-$t$ path satisfying both length and weight bounds, using directed arc variables with MTZ ordering to prevent subtours. + Find an $s$-$t$ path satisfying both length and weight bounds, using directed arc variables with MTZ ordering $o_v - o_u >= 1 - M (1 - a_(u,v))$ on selected arcs to prevent subtours. ][ - _Construction._ Variables: binary $a_(e,d) in {0, 1}$ per edge $e$ per direction $d in {0, 1}$ (forward/reverse), plus integer $o_v in {0, dots, n-1}$ per vertex. Constraints: flow balance at each vertex (net out-in = 1 at $s$, $-1$ at $t$, 0 elsewhere); degree bounds; at most one direction per edge; MTZ ordering $o_v - o_u >= 1 - M dot a_(e,0)$ for each directed arc; length bound $sum_e l_e (a_(e,0) + a_(e,1)) <= L$; weight bound $sum_e w_e (a_(e,0) + a_(e,1)) <= W$. Objective: feasibility. - - _Correctness._ Flow balance forces an $s$-$t$ path; MTZ ordering eliminates subtours; bound constraints enforce the length and weight limits. - - _Solution extraction._ Edge $e$ is selected iff $a_(e,0) + a_(e,1) > 0$. + _Construction._ Let $A$ contain both orientations of every undirected edge, let $L = K$ be the source length bound, and let $M = n$. Variables: binary $a_(u,v) in {0, 1}$ for each directed arc $(u, v) in A$, plus integer $o_v in {0, dots, n-1}$ per vertex. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(w : (v, w) in A) a_(v,w) - sum_(u : (u, v) in A) a_(u,v) = b_v quad forall v in V \ + & sum_(w : (v, w) in A) a_(v,w) <= 1 quad forall v in V \ + & sum_(u : (u, v) in A) a_(u,v) <= 1 quad forall v in V \ + & a_(u,v) + a_(v,u) <= 1 quad forall {u, v} in E \ + & o_v - o_u >= 1 - M (1 - a_(u,v)) quad forall (u, v) in A \ + & sum_((u,v) in A) l_(u,v) a_(u,v) <= L \ + & sum_((u,v) in A) w_(u,v) a_(u,v) <= W \ + & a_(u,v) in {0, 1}, o_v in {0, dots, n - 1} + $, + where $b_s = 1$, $b_t = -1$, and $b_v = 0$ otherwise. + + _Correctness._ Flow balance forces an $s$-$t$ path; the MTZ inequalities apply only on selected arcs and therefore eliminate subtours; bound constraints enforce the length and weight limits. + + _Solution extraction._ Edge $\{u, v\}$ is selected iff $a_(u,v) + a_(v,u) > 0$. ] #reduction-rule("MultipleCopyFileAllocation", "ILP")[ Place file copies at vertices to minimize total storage plus weighted access cost, subject to a budget constraint. ][ - _Construction._ Variables: binary $x_v$ (copy at $v$) and $y_(v,u)$ (vertex $v$ served by copy at $u$). Constraints: $sum_u y_(v,u) = 1$ (assignment); $y_(v,u) <= x_u$ (capacity link); $sum_v s_v x_v + sum_(v,u) "usage"_v dot d(v, u) dot y_(v,u) <= B$ (budget). Objective: feasibility. + _Construction._ Variables: binary $x_v$ (copy at $v$) and $y_(v,u)$ (vertex $v$ served by copy at $u$). The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_u y_(v,u) = 1 quad forall v \ + & y_(v,u) <= x_u quad forall v, u \ + & sum_v s_v x_v + sum_(v,u) "usage"_v d(v, u) y_(v,u) <= B \ + & x_v, y_(v,u) in {0, 1} + $. _Correctness._ Assignment constraints ensure each vertex is served by exactly one copy; capacity links prevent assignment to non-copy vertices; the budget constraint linearizes the total cost. @@ -8057,7 +8302,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinimumSumMulticenter", "ILP")[ Select $k$ centers and assign each vertex to a center, minimizing the total weighted distance. ][ - _Construction._ Variables: binary $x_j$ (vertex $j$ is center), $y_(i,j)$ (vertex $i$ assigned to center $j$). Constraints: $sum_j x_j = k$; $y_(i,j) <= x_j$; $sum_j y_(i,j) = 1$. Objective: minimize $sum_(i,j) w_i dot d(i, j) dot y_(i,j)$. + _Construction._ Variables: binary $x_j$ (vertex $j$ is center), $y_(i,j)$ (vertex $i$ assigned to center $j$). The ILP is: + $ + min quad & sum_(i,j) w_i d(i, j) y_(i,j) \ + "subject to" quad & sum_j x_j = k \ + & y_(i,j) <= x_j quad forall i, j \ + & sum_j y_(i,j) = 1 quad forall i \ + & x_j, y_(i,j) in {0, 1} + $. _Correctness._ The assignment structure and cardinality constraint directly encode the $k$-median objective with precomputed shortest-path distances. @@ -8067,7 +8319,15 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinMaxMulticenter", "ILP")[ Select $k$ centers such that the maximum weighted distance from any vertex to its assigned center is at most $B$. ][ - _Construction._ Same assignment structure as MinimumSumMulticenter, plus per-vertex bound constraints: $sum_j w_i dot d(i, j) dot y_(i,j) <= B$ for each $i$. Objective: feasibility. + _Construction._ Same assignment structure as MinimumSumMulticenter, plus per-vertex bound constraints. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_j x_j = k \ + & y_(i,j) <= x_j quad forall i, j \ + & sum_j y_(i,j) = 1 quad forall i \ + & sum_j w_i d(i, j) y_(i,j) <= B quad forall i \ + & x_j, y_(i,j) in {0, 1} + $. _Correctness._ The additional per-vertex constraints enforce the minimax bound on weighted assignment distances. @@ -8077,7 +8337,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MultiprocessorScheduling", "ILP")[ Assign tasks to processors so that no processor's total load exceeds the deadline. ][ - _Construction._ Variables: binary $x_(j,p)$ (task $j$ on processor $p$), one-hot per task. Constraints: $sum_p x_(j,p) = 1$ (each task assigned); $sum_j l_j dot x_(j,p) <= D$ for each processor $p$. Objective: feasibility. + _Construction._ Variables: binary $x_(j,p)$ (task $j$ on processor $p$), one-hot per task. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(j,p) = 1 quad forall j \ + & sum_j l_j x_(j,p) <= D quad forall p \ + & x_(j,p) in {0, 1} + $. _Correctness._ One-hot constraints ensure each task is assigned to exactly one processor; load constraints enforce the deadline on every processor. @@ -8087,7 +8353,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("CapacityAssignment", "ILP")[ Assign a capacity level to each link so that total cost and total delay stay within their budgets. ][ - _Construction._ Variables: binary $x_(l,c)$ (link $l$ gets capacity $c$), one-hot per link. Constraints: $sum_c x_(l,c) = 1$ (each link gets one capacity); $sum_(l,c) "cost"[l][c] dot x_(l,c) <= C$; $sum_(l,c) "delay"[l][c] dot x_(l,c) <= D$. Objective: feasibility. + _Construction._ Variables: binary $x_(l,c)$ (link $l$ gets capacity $c$), one-hot per link. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_c x_(l,c) = 1 quad forall l \ + & sum_(l,c) "cost"[l][c] x_(l,c) <= C \ + & sum_(l,c) "delay"[l][c] x_(l,c) <= D \ + & x_(l,c) in {0, 1} + $. _Correctness._ One-hot constraints fix one capacity per link; the two budget constraints are linear in the indicators. @@ -8097,7 +8370,16 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("ExpectedRetrievalCost", "ILP")[ Assign records to sectors to minimize expected retrieval cost, using product linearization for the quadratic cost terms. ][ - _Construction._ Variables: binary $x_(r,s)$ (record $r$ in sector $s$), one-hot per record, plus linearization variables $z_((r,s),(r',s')) = x_(r,s) dot x_(r',s')$. Constraints: one-hot assignment; McCormick linearization ($z <= x$, $z <= y$, $z >= x + y - 1$); cost bound $sum "cost" dot z <= B$. Objective: feasibility. + _Construction._ Variables: binary $x_(r,s)$ (record $r$ in sector $s$), one-hot per record, plus linearization variables $z_((r,s),(r',s')) = x_(r,s) dot x_(r',s')$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_s x_(r,s) = 1 quad forall r \ + & z_((r,s),(r',s')) <= x_(r,s) quad forall r, s, r', s' \ + & z_((r,s),(r',s')) <= x_(r',s') quad forall r, s, r', s' \ + & z_((r,s),(r',s')) >= x_(r,s) + x_(r',s') - 1 quad forall r, s, r', s' \ + & sum "cost" dot z <= B \ + & x_(r,s), z_((r,s),(r',s')) in {0, 1} + $. _Correctness._ McCormick constraints force $z$ to equal the product of binary indicators, linearizing the quadratic cost. @@ -8107,7 +8389,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("PartitionIntoTriangles", "ILP")[ Partition vertices into groups of 3 such that each group forms a triangle in the graph. ][ - _Construction._ Variables: binary $x_(v,g)$ (vertex $v$ in group $g$), one-hot per vertex, $q = n\/3$ groups. Constraints: $sum_g x_(v,g) = 1$; $sum_v x_(v,g) = 3$ for each $g$; $x_(u,g) + x_(v,g) <= 1$ for each group $g$ and non-edge $(u, v)$. Objective: feasibility. + _Construction._ Variables: binary $x_(v,g)$ (vertex $v$ in group $g$), one-hot per vertex, $q = n\/3$ groups. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_g x_(v,g) = 1 quad forall v \ + & sum_v x_(v,g) = 3 quad forall g in {1, dots, q} \ + & x_(u,g) + x_(v,g) <= 1 quad forall g in {1, dots, q}, (u, v) in.not E \ + & x_(v,g) in {0, 1} + $. _Correctness._ Size-3 groups with no non-edge pair within any group forces each group to be a triangle. @@ -8117,7 +8406,17 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("PartitionIntoPathsOfLength2", "ILP")[ Partition vertices into groups of 3 such that each group induces a path of length 2 (at least 2 edges within the group). ][ - _Construction._ Variables: binary $x_(v,g)$ plus product linearization variables $z_((u,v),g) = x_(u,g) dot x_(v,g)$ for edges $(u, v)$. Constraints: one-hot vertex assignment; group size = 3; $sum_("edges" (u,v)) z_((u,v),g) >= 2$ per group (at least 2 edges); McCormick for $z$. Objective: feasibility. + _Construction._ Variables: binary $x_(v,g)$ plus product linearization variables $z_((u,v),g) = x_(u,g) dot x_(v,g)$ for edges $(u, v)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_g x_(v,g) = 1 quad forall v \ + & sum_v x_(v,g) = 3 quad forall g \ + & sum_((u,v) in E) z_((u,v),g) >= 2 quad forall g \ + & z_((u,v),g) <= x_(u,g) quad forall (u, v) in E, g \ + & z_((u,v),g) <= x_(v,g) quad forall (u, v) in E, g \ + & z_((u,v),g) >= x_(u,g) + x_(v,g) - 1 quad forall (u, v) in E, g \ + & x_(v,g), z_((u,v),g) in {0, 1} + $. _Correctness._ The edge count constraint ensures connectivity within each group. Combined with group size 3, this forces a path of length 2. @@ -8127,7 +8426,16 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("SumOfSquaresPartition", "ILP")[ Partition elements into groups such that $sum_g (sum_(i in g) s_i)^2 <= B$. ][ - _Construction._ Variables: binary $x_(i,g)$ (element $i$ in group $g$), plus $z_((i,j),g) = x_(i,g) dot x_(j,g)$. Constraints: one-hot assignment; McCormick linearization for $z$; $sum_g sum_(i,j) s_i s_j z_((i,j),g) <= B$. Objective: feasibility. + _Construction._ Variables: binary $x_(i,g)$ (element $i$ in group $g$), plus $z_((i,j),g) = x_(i,g) dot x_(j,g)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_g x_(i,g) = 1 quad forall i \ + & z_((i,j),g) <= x_(i,g) quad forall i, j, g \ + & z_((i,j),g) <= x_(j,g) quad forall i, j, g \ + & z_((i,j),g) >= x_(i,g) + x_(j,g) - 1 quad forall i, j, g \ + & sum_g sum_(i,j) s_i s_j z_((i,j),g) <= B \ + & x_(i,g), z_((i,j),g) in {0, 1} + $. _Correctness._ Product linearization captures the quadratic sum-of-squares objective; the bound constraint enforces the partition quality. @@ -8137,7 +8445,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("PrecedenceConstrainedScheduling", "ILP")[ Assign unit-length tasks to time slots on $m$ processors, respecting precedence constraints and a deadline. ][ - _Construction._ Variables: binary $x_(j,t)$ (task $j$ at time $t$), one-hot per task. Constraints: $sum_t x_(j,t) = 1$; $sum_j x_(j,t) <= m$ (processor capacity per slot); $sum_t t dot x_(j,t) >= sum_t t dot x_(i,t) + 1$ for each precedence $(i, j)$. Objective: feasibility. + _Construction._ Variables: binary $x_(j,t)$ (task $j$ at time $t$), one-hot per task. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_t x_(j,t) = 1 quad forall j \ + & sum_j x_(j,t) <= m quad forall t \ + & sum_t t x_(j,t) >= sum_t t x_(i,t) + 1 quad "for each precedence" (i, j) \ + & x_(j,t) in {0, 1} + $. _Correctness._ One-hot ensures each task is scheduled once; capacity limits processors per slot; precedence is linearized via weighted time indicators. @@ -8147,7 +8462,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("SchedulingWithIndividualDeadlines", "ILP")[ Schedule unit-length tasks on $m$ processors, each task $j$ must complete before its individual deadline $d_j$. ][ - _Construction._ Variables: binary $x_(j,t)$ (task $j$ at time $t in {0, dots, d_j - 1}$), one-hot per task. Constraints: $sum_t x_(j,t) = 1$; $sum_j x_(j,t) <= m$; precedence constraints as in PrecedenceConstrainedScheduling. Objective: feasibility. + _Construction._ Variables: binary $x_(j,t)$ (task $j$ at time $t in {0, dots, d_j - 1}$), one-hot per task. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(t = 0)^(d_j - 1) x_(j,t) = 1 quad forall j \ + & sum_j x_(j,t) <= m quad forall t \ + & sum_t t x_(j,t) >= sum_t t x_(i,t) + 1 quad "for each precedence" (i, j) \ + & x_(j,t) in {0, 1} + $. _Correctness._ Per-task deadline is enforced by restricting the time domain of each task's indicator variables. @@ -8157,7 +8479,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("SequencingWithinIntervals", "ILP")[ Schedule tasks with release times, deadlines, and processing lengths on a single machine without overlap. ][ - _Construction._ Variables: binary $x_(j,t)$ (task $j$ starts at time $t in [r_j, d_j - l_j]$), one-hot per task. Constraints: $sum_t x_(j,t) = 1$; non-overlap: $sum_(j "active at" t) 1 <= 1$ (expanded via start-time indicators). Objective: feasibility. + _Construction._ Variables: binary $x_(j,t)$ (task $j$ starts at time $t in [r_j, d_j - l_j]$), one-hot per task. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(t = r_j)^(d_j - l_j) x_(j,t) = 1 quad forall j \ + & sum_(j, t : t <= tau < t + l_j) x_(j,t) <= 1 quad forall tau \ + & x_(j,t) in {0, 1} + $. _Correctness._ One-hot ensures each task starts once within its feasible window; non-overlap prevents simultaneous execution. @@ -8167,7 +8495,13 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("MinimumFeedbackArcSet", "ILP")[ Remove minimum-weight arcs to make a directed graph acyclic, using MTZ-style ordering to enforce acyclicity among kept arcs. ][ - _Construction._ Variables: binary $y_a in {0, 1}$ per arc ($y_a = 1$ iff removed), integer $o_v in {0, dots, n-1}$ per vertex. Constraints: for each arc $a = (u -> v)$: $o_v - o_u >= 1 - n dot y_a$; $y_a <= 1$; $o_v <= n - 1$. Objective: minimize $sum_a w_a y_a$. + _Construction._ Variables: binary $y_a in {0, 1}$ per arc ($y_a = 1$ iff removed), integer $o_v in {0, dots, n-1}$ per vertex. The ILP is: + $ + min quad & sum_a w_a y_a \ + "subject to" quad & o_v - o_u >= 1 - n y_a quad forall a = (u -> v) \ + & y_a in {0, 1} quad forall a \ + & o_v in {0, dots, n - 1} quad forall v + $. _Correctness._ ($arrow.r.double$) Removing a FAS leaves a DAG with a topological ordering satisfying all constraints. ($arrow.l.double$) Among kept arcs, the ordering variables enforce acyclicity: a cycle would require $o_(v_1) < dots < o_(v_k) < o_(v_1)$, a contradiction. @@ -8177,7 +8511,15 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("UndirectedTwoCommodityIntegralFlow", "ILP")[ Route two commodities on an undirected graph with shared edge capacities, using direction indicators to enforce anti-parallel flow constraints. ][ - _Construction._ Variables: integer flow variables $f^k_(u,v), f^k_(v,u)$ per edge per commodity ($k in {1, 2}$), plus binary direction indicators $d^k_e$. Constraints: capacity sharing via direction indicators; anti-parallel flow (at most one direction per commodity per edge); flow conservation per commodity per vertex; demand satisfaction at sinks. Objective: feasibility. + _Construction._ Variables: integer flow variables $f^k_(u,v), f^k_(v,u)$ per edge per commodity ($k in {1, 2}$), plus binary direction indicators $d^k_e$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & f^k_(u,v) <= "cap"_e d^k_e quad forall e = {u, v} in E, k in {1, 2} \ + & f^k_(v,u) <= "cap"_e (1 - d^k_e) quad forall e = {u, v} in E, k in {1, 2} \ + & sum_(k=1)^2 (f^k_(u,v) + f^k_(v,u)) <= "cap"_e quad forall e = {u, v} in E \ + & sum_(w) f^k_(v,w) - sum_(u) f^k_(u,v) = b^k_v quad forall k in {1, 2}, v in V \ + & d^k_e in {0, 1}, f^k_(u,v) in ZZ_(>=0) + $. _Correctness._ Direction indicators linearize the capacity-sharing constraint; flow conservation and demand constraints ensure valid multi-commodity flow. @@ -8187,7 +8529,14 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("DirectedTwoCommodityIntegralFlow", "ILP")[ Route two commodities on a directed graph with shared arc capacities. ][ - _Construction._ Variables: integer $f^1_a, f^2_a >= 0$ per arc $a$. Constraints: $f^1_a + f^2_a <= "cap"(a)$; flow conservation per commodity per vertex; demand at sinks $>= R_k$. Objective: feasibility. + _Construction._ Variables: integer $f^1_a, f^2_a >= 0$ per arc $a$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & f^1_a + f^2_a <= "cap"(a) quad forall a in A \ + & sum_(a in delta^+(v)) f^k_a - sum_(a in delta^-(v)) f^k_a = b^k_v quad forall k in {1, 2}, v in V \ + & sum_(a in delta^-(t_k)) f^k_a - sum_(a in delta^+(t_k)) f^k_a >= R_k quad forall k in {1, 2} \ + & f^1_a, f^2_a in ZZ_(>=0) quad forall a in A + $. _Correctness._ Joint capacity and conservation constraints directly encode the two-commodity flow problem. @@ -8197,13 +8546,1101 @@ The following reductions to Integer Linear Programming are straightforward formu #reduction-rule("UndirectedFlowLowerBounds", "ILP")[ Find a feasible single-commodity flow on an undirected graph with both upper and lower capacity bounds per edge. ][ - _Construction._ Variables: integer $f_(u,v), f_(v,u) >= 0$ per edge, plus direction indicator $z_e in {0, 1}$. Constraints: $z_e <= 1$; $f_(u,v) <= "cap"_e dot z_e$; $f_(v,u) <= "cap"_e (1 - z_e)$; $f_(u,v) >= "lower"_e dot z_e$; $f_(v,u) >= "lower"_e (1 - z_e)$; flow conservation; demand at sink. Objective: feasibility. + _Construction._ Variables: integer $f_(u,v), f_(v,u) >= 0$ per edge, plus direction indicator $z_e in {0, 1}$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & f_(u,v) <= "cap"_e z_e quad forall e = {u, v} in E \ + & f_(v,u) <= "cap"_e (1 - z_e) quad forall e = {u, v} in E \ + & f_(u,v) >= "lower"_e z_e quad forall e = {u, v} in E \ + & f_(v,u) >= "lower"_e (1 - z_e) quad forall e = {u, v} in E \ + & sum_(a in delta^+(v)) f_a - sum_(a in delta^-(v)) f_a = b_v quad forall v in V \ + & sum_(a in delta^-(t)) f_a - sum_(a in delta^+(t)) f_a >= R \ + & z_e in {0, 1}, f_(u,v) in ZZ_(>=0) + $. _Correctness._ Direction indicators force flow in one direction per edge; bounds enforce both upper and lower capacity limits. _Solution extraction._ Edge orientations: $z_e$ values. ] +// Flow-based + +#reduction-rule("IntegralFlowHomologousArcs", "ILP")[ + Use one integer flow variable per arc, with standard conservation plus equality constraints on every homologous pair. +][ + _Construction._ Variables: integer $f_a >= 0$ per arc $a in A$. The ILP is: + $ + "find" quad & (f_a)_(a in A) \ + "subject to" quad & f_a <= c_a quad forall a in A \ + & sum_(a in delta^-(v)) f_a = sum_(a in delta^+(v)) f_a quad forall v in V backslash {s, t} \ + & f_a = f_b quad forall (a, b) \ + & sum_(a in delta^-(t)) f_a - sum_(a in delta^+(t)) f_a >= R \ + & f_a in ZZ_(>=0) quad forall a in A + $. + + _Correctness._ ($arrow.r.double$) Any feasible integral flow already satisfies the capacity, conservation, equality, and sink-demand constraints. ($arrow.l.double$) Any feasible ILP assignment is exactly an integral arc-flow meeting the homologous-pair and requirement conditions. + + _Solution extraction._ Output the arc-flow vector $(f_a)_(a in A)$ in the source arc order. +] + +#reduction-rule("IntegralFlowWithMultipliers", "ILP")[ + The source constraints are linear after writing one integer flow variable per arc and enforcing multiplier-scaled conservation at each non-terminal. +][ + _Construction._ Variables: integer $f_a >= 0$ per arc $a = (u -> v)$. The ILP is: + $ + "find" quad & (f_a)_(a in A) \ + "subject to" quad & f_a <= c_a quad forall a in A \ + & sum_(a in delta^+(v)) f_a = h(v) sum_(a in delta^-(v)) f_a quad forall v in V backslash {s, t} \ + & sum_(a in delta^-(t)) f_a - sum_(a in delta^+(t)) f_a >= R \ + & f_a in ZZ_(>=0) quad forall a in A + $. + + _Correctness._ ($arrow.r.double$) A valid multiplier flow satisfies these linear equalities and inequalities by definition. ($arrow.l.double$) Any feasible ILP solution gives an integral arc flow whose non-terminal outflow equals the prescribed multiple of its inflow and whose sink inflow meets the requirement. + + _Solution extraction._ Output the arc-flow vector $(f_a)_(a in A)$. +] + +#reduction-rule("PathConstrainedNetworkFlow", "ILP")[ + Because flow may use only the prescribed $s$-$t$ paths, it suffices to assign an integer amount to each allowed path and aggregate those loads on every arc. +][ + _Construction._ Let $P_1, dots, P_q$ be the prescribed paths. Variables: integer $f_i >= 0$ for each path $P_i$. The ILP is: + $ + "find" quad & (f_i)_(i = 1)^q \ + "subject to" quad & sum_(i : a in P_i) f_i <= c_a quad forall a in A \ + & sum_i f_i >= R \ + & f_i in ZZ_(>=0) quad forall i in {1, dots, q} + $. + + _Correctness._ ($arrow.r.double$) Any valid path-flow assignment respects every arc capacity and delivers at least $R$ units in total. ($arrow.l.double$) Any feasible ILP solution assigns integral flow only to the prescribed paths, and the aggregated arc loads satisfy the network capacities. + + _Solution extraction._ Output the path-flow vector $(f_1, dots, f_q)$. +] + +#reduction-rule("DisjointConnectingPaths", "ILP")[ + Route one unit of flow for each terminal pair on an oriented copy of the graph, and forbid internal vertices from carrying more than one commodity. +][ + _Construction._ For terminal pairs $(s_k, t_k)$, variables: binary $f^k_(u,v)$ on each orientation of each edge and integer order variables $h^k_v$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(w) f^k_(s_k,w) - sum_(u) f^k_(u,s_k) = 1 quad forall k \ + & sum_(u) f^k_(u,t_k) - sum_(w) f^k_(t_k,w) = 1 quad forall k \ + & sum_(w) f^k_(v,w) - sum_(u) f^k_(u,v) = 0 quad forall k, v in V backslash {s_k, t_k} \ + & f^k_(u,v) + f^k_(v,u) <= 1 quad forall {u, v} in E, k \ + & sum_k sum_(w in N(v)) f^k_(v,w) <= 1 quad forall "non-terminal" v \ + & h^k_v >= h^k_u + 1 - M (1 - f^k_(u,v)) quad forall k, u -> v \ + & f^k_(u,v) in {0, 1}, h^k_v in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) A family of pairwise internally vertex-disjoint connecting paths orients each path from its source to its sink and satisfies all constraints. ($arrow.l.double$) The conservation, disjointness, and ordering constraints force each commodity to trace one simple path, and different commodities can intersect only at terminals. + + _Solution extraction._ Mark an edge selected in the source config iff some orientation of that edge carries flow for some commodity. +] + +#reduction-rule("LengthBoundedDisjointPaths", "ILP")[ + Use one unit-flow commodity for each requested path and add hop variables so every chosen path has at most the source bound $K$ edges. +][ + _Construction._ Variables: binary $f^k_(u,v)$ on each orientation of each edge for each path slot $k$, plus integer hop variables $h^k_v in {0, dots, K}$, where $K$ is the path-length bound and $M = K + 1$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(w) f^k_(s,w) - sum_(u) f^k_(u,s) = 1 quad forall k \ + & sum_(u) f^k_(u,t) - sum_(w) f^k_(t,w) = 1 quad forall k \ + & sum_(w) f^k_(v,w) - sum_(u) f^k_(u,v) = 0 quad forall k, v in V backslash {s, t} \ + & f^k_(u,v) + f^k_(v,u) <= 1 quad forall {u, v} in E, k \ + & sum_k sum_(w in N(v)) f^k_(v,w) <= 1 quad forall v in V backslash {s, t} \ + & h^k_s = 0 quad forall k \ + & h^k_v >= h^k_u + 1 - M (1 - f^k_(u,v)) quad forall k, u -> v \ + & h^k_t <= K quad forall k \ + & f^k_(u,v) in {0, 1}, h^k_v in {0, dots, K} + $. + + _Correctness._ ($arrow.r.double$) A collection of $J$ internally disjoint $s$-$t$ paths of length at most $K$ yields feasible commodity flows and consistent hop labels. ($arrow.l.double$) The flow and hop constraints force each commodity to be a simple $s$-$t$ path, while the vertex-disjointness inequalities match the source requirement. + + _Solution extraction._ For each path slot $k$, set the source vertex-indicator block to 1 exactly on the vertices incident to the commodity-$k$ path, including $s$ and $t$. +] + +#reduction-rule("MixedChinesePostman", "ILP")[ + Choose an orientation for every undirected edge, then add integer traversal variables on the available directed arcs to balance the oriented required multigraph within the length bound. +][ + _Construction._ Let $n = |V|$, let the original directed arcs be $A = {a_0, dots, a_(m-1)}$ with $a_i = (alpha_i, beta_i)$, and let the undirected edges be $E = {e_0, dots, e_(q-1)}$ with $e_k = {u_k, v_k}$. Set $R = m + q$. If $R = 0$, return the empty feasible ILP: the empty walk already has length 0. Otherwise form the available directed-arc list + $A^* = {b_0, dots, b_(L-1)}$ with $L = m + 2 q$, + where $b_i = a_i$ for $0 <= i < m$, $b_(m + 2 k) = (u_k, v_k)$, and $b_(m + 2 k + 1) = (v_k, u_k)$. + Write $b_j = ("tail"_j, "head"_j)$ and let $ell_j$ be the corresponding length. Use `ILP` with binary variables encoded by bounds $0 <= x <= 1$. Order the variables as + $(d_0, dots, d_(q-1), g_0, dots, g_(L-1), y_0, dots, y_(L-1), z_0, dots, z_(n-1), rho_0, dots, rho_(n-1), s, b_0, dots, b_(n-1), f_0, dots, f_(L-1), h_0, dots, h_(L-1))$, + so $d_k$ has index $k$, $g_j$ has index $q + j$, $y_j$ has index $q + L + j$, $z_v$ has index $q + 2 L + v$, $rho_v$ has index $q + 2 L + n + v$, $s$ has index $q + 2 L + 2 n$, $b_v$ has index $q + 2 L + 2 n + 1 + v$, $f_j$ has index $q + 2 L + 3 n + 1 + j$, and $h_j$ has index $q + 3 L + 3 n + 1 + j$. There are $q + 4 L + 3 n + 1$ variables in total. + + The orientation bit $d_k in {0, 1}$ means $d_k = 0$ chooses $u_k -> v_k$ and $d_k = 1$ chooses $v_k -> u_k$. Define the required multiplicity on each available arc explicitly by + $r_i(d) = 1$ for $0 <= i < m$, + $r_(m + 2 k)(d) = 1 - d_k$, + and $r_(m + 2 k + 1)(d) = d_k$. + Thus the two oriented copies of each undirected edge are already linear in the orientation bit. + + Let $G = R (n - 1)$ and $M_"use" = 1 + G$. The variable $g_j in {0, dots, G}$ counts extra traversals of $b_j$ beyond the required multiplicity, so the total multiplicity of $b_j$ is $r_j(d) + g_j$. The bound $G = R (n - 1)$ is exact for this formulation: any closed walk can be shortcut so that between consecutive required traversals it uses a simple connector path of at most $n - 1$ arcs, and there are exactly $R$ such connector segments in the cyclic order of the required traversals. + + The constraints are: + $sum_(j : "tail"_j = v) (r_j(d) + g_j) - sum_(j : "head"_j = v) (r_j(d) + g_j) = 0$ for every $v in V$; + $r_j(d) + g_j <= M_"use" y_j$ and $y_j <= r_j(d) + g_j$ for every $j in {0, dots, L - 1}$, so $y_j = 1$ iff arc $b_j$ is used at least once; + $y_j <= z_"tail"_j$ and $y_j <= z_"head"_j$ for every $j$, and $z_v <= sum_(j : "tail"_j = v " or " "head"_j = v) y_j$ for every vertex $v$, so $z_v = 1$ iff $v$ is incident to some used arc; + $s = sum_v z_v$; + $sum_v rho_v = 1$, $rho_v <= z_v$ for every $v$, and the product linearization $b_v <= s$, $b_v <= n rho_v$, $b_v >= s - n (1 - rho_v)$, $b_v >= 0$ for every $v$, so $b_v = s rho_v$ and therefore the unique root chosen by $rho$ supplies $s - 1$ units of connectivity flow; + $0 <= f_j <= (n - 1) y_j$ and $0 <= h_j <= (n - 1) y_j$ for every available arc $b_j$; here the exact big-$M$ for arc activation is $n - 1$, because at most one unit is demanded by each non-root active vertex; + $sum_(j : "tail"_j = v) f_j - sum_(j : "head"_j = v) f_j = b_v - z_v$ for every vertex $v$; and + $sum_(j : "head"_j = v) h_j - sum_(j : "tail"_j = v) h_j = b_v - z_v$ for every vertex $v$. + The $f$-flow makes every active vertex reachable from the chosen root, and the $h$-flow makes the root reachable from every active vertex on the same used support. + Finally impose the length bound + $sum_(j = 0)^(L - 1) ell_j (r_j(d) + g_j) <= B$. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(j : "tail"_j = v) (r_j(d) + g_j) - sum_(j : "head"_j = v) (r_j(d) + g_j) = 0 quad forall v in V \ + & r_j(d) + g_j <= M_"use" y_j, y_j <= r_j(d) + g_j quad forall j in {0, dots, L - 1} \ + & y_j <= z_"tail"_j, y_j <= z_"head"_j quad forall j \ + & z_v <= sum_(j : "tail"_j = v " or " "head"_j = v) y_j quad forall v in V \ + & s = sum_v z_v; sum_v rho_v = 1; rho_v <= z_v quad forall v in V \ + & "the standard product linearization enforces" b_v = s rho_v quad forall v in V \ + & 0 <= f_j, h_j <= (n - 1) y_j quad forall j in {0, dots, L - 1} \ + & "forward and reverse root-flow conservation hold on the used support" \ + & sum_(j = 0)^(L - 1) ell_j (r_j(d) + g_j) <= B \ + & d_k, y_j, z_v, rho_v in {0, 1}; g_j in {0, dots, G}; f_j, h_j in {0, dots, n - 1}; s, b_v in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) From any feasible mixed-postman tour, set $d_k$ from the direction in which edge $e_k$ is first required, let $g_j$ be the number of extra copies of $b_j$ beyond the required multiplicity, and let $y_j$ mark the positive-support arcs. The tour itself visits exactly the active vertices, so some active vertex can be chosen as the root. Taking one outgoing spanning arborescence and one incoming spanning arborescence of the used Eulerian digraph gives feasible $f$- and $h$-flows. The walk length is exactly $sum_j ell_j (r_j(d) + g_j)$, hence the ILP is feasible. + ($arrow.l.double$) A feasible ILP solution chooses one direction for every undirected edge, and the balance equations make the directed multigraph with multiplicities $r_j(d) + g_j$ Eulerian. The two root-flow systems imply that the positive-support digraph on the active vertices is strongly connected. Therefore the used multigraph admits an Euler tour, and its total length is exactly the bounded linear form above, so the source instance is a YES-instance. + + _Solution extraction._ Return the orientation bits $d_e$ in the source edge order. +] + +#reduction-rule("RuralPostman", "ILP")[ + Use one traversal-multiplicity variable per edge, together with activation and connectivity constraints, to encode an Eulerian connected subgraph covering all required edges. +][ + _Construction._ If $E' = emptyset$, the empty circuit already satisfies the source instance whenever $B >= 0$, so use the empty ILP. Otherwise fix a root vertex $r$ incident to some required edge and let $n = |V|$. Variables: integer $t_e in {0, 1, 2}$ and parity variables $q_v$, binary edge-activation flags $y_e$, binary vertex-activity flags $z_v$, and nonnegative connectivity-flow variables $f_(u,v)$ on both orientations of every edge. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & y_e <= t_e <= 2 y_e quad forall e in E \ + & t_e >= 1 quad forall e in E' \ + & sum_(e : v in e) t_e = 2 q_v quad forall v \ + & y_e <= z_u, y_e <= z_v quad forall e = {u, v} in E \ + & z_v <= sum_(e : v in e) y_e quad forall v in V \ + & f_(u,v) <= (n - 1) y_e, f_(v,u) <= (n - 1) y_e quad forall e = {u, v} in E \ + & sum_(w : {r, w} in E) f_(r,w) - sum_(u : {u, r} in E) f_(u,r) = sum_v z_v - 1 \ + & sum_(u : {u, v} in E) f_(u,v) - sum_(w : {v, w} in E) f_(v,w) = z_v quad forall v in V backslash {r} \ + & sum_e ell_e t_e <= B \ + & y_e, z_v in {0, 1}, t_e in {0, 1, 2}, q_v, f_(u,v) in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any feasible rural-postman circuit uses each edge at most twice, has even degrees, is connected on its positive-support edges, and satisfies the bound. ($arrow.l.double$) A feasible ILP solution defines a connected Eulerian multigraph containing every required edge, hence an Eulerian circuit of total length at most $B$. + + _Solution extraction._ Output the traversal multiplicities $(t_e)_(e in E)$. +] + +#reduction-rule("StackerCrane", "ILP")[ + Encode the required-arc order by a one-hot position assignment and charge the shortest connector distance between each consecutive pair of required arcs. +][ + _Construction._ Let the required arcs be $A = {a_0, dots, a_(m-1)}$ with $a_i = ("tail"_i, "head"_i)$. Build the mixed connector graph + $H = (V, A union {(u, v), (v, u) : {u, v} in E})$, + where the original required arcs keep their given lengths and each undirected edge contributes both orientations with the same length. Because all lengths are nonnegative, compute the all-pairs connector distances + $D[u, v] = "dist"_H(u, v)$ + either by running Dijkstra from every source vertex or by Floyd--Warshall on the $n$-vertex graph $H$; this is exactly the graph queried by `mixed_graph_adjacency()` and `shortest_path_length()` in the model. If $D[u, v] = oo$, the pair is impossible and will be forbidden explicitly. + + Use `ILP`. The binary position variables are $x_(i,p)$ for $i, p in {0, dots, m - 1}$, with index + $"idx"_x(i, p) = i m + p$. + The binary McCormick variables are $z_(i,j,p)$ for $i, j, p in {0, dots, m - 1}$, where position $p + 1$ is interpreted cyclically as $(p + 1) mod m$; their indices are + $"idx"_z(i, j, p) = m^2 + p m^2 + i m + j$. + There are $m^2 + m^3$ binary variables. + + The constraints are: + $sum_(p = 0)^(m - 1) x_(i,p) = 1$ for each required arc $i$; + $sum_(i = 0)^(m - 1) x_(i,p) = 1$ for each position $p$; + $z_(i,j,p) <= x_(i,p)$, $z_(i,j,p) <= x_(j,(p + 1) mod m)$, and $z_(i,j,p) >= x_(i,p) + x_(j,(p + 1) mod m) - 1$ for all $i, j, p$; + if $D["head"_i, "tail"_j] = oo$, then either set $z_(i,j,p) = 0$ for all $p$ or, equivalently, impose $x_(i,p) + x_(j,(p + 1) mod m) <= 1$ for all $p$; + and finally + $sum_(i = 0)^(m - 1) ell_i + sum_(p = 0)^(m - 1) sum_(i = 0)^(m - 1) sum_(j = 0)^(m - 1) D["head"_i, "tail"_j] z_(i,j,p) <= B$. + The first term is the total length of the required traversals, and the second term charges exactly one connector distance for each consecutive pair in the cyclic order. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(p = 0)^(m - 1) x_(i,p) = 1 quad forall i in {0, dots, m - 1} \ + & sum_(i = 0)^(m - 1) x_(i,p) = 1 quad forall p in {0, dots, m - 1} \ + & z_(i,j,p) <= x_(i,p), z_(i,j,p) <= x_(j,(p + 1) mod m) quad forall i, j, p \ + & z_(i,j,p) >= x_(i,p) + x_(j,(p + 1) mod m) - 1 quad forall i, j, p \ + & z_(i,j,p) = 0 quad "whenever" D["head"_i, "tail"_j] = oo \ + & sum_(i = 0)^(m - 1) ell_i + sum_(p = 0)^(m - 1) sum_(i = 0)^(m - 1) sum_(j = 0)^(m - 1) D["head"_i, "tail"_j] z_(i,j,p) <= B \ + & x_(i,p), z_(i,j,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible Stacker Crane permutation determines a one-hot assignment and consecutive-pair indicators whose connector costs equal the route length. ($arrow.l.double$) Any feasible ILP solution yields a permutation of the required arcs, and the linearized connector term is exactly the sum of shortest paths between consecutive arcs. + + _Solution extraction._ Decode the permutation by taking, for each position $p$, the unique arc $a$ with $x_(a,p) = 1$. +] + +#reduction-rule("SteinerTreeInGraphs", "ILP")[ + Select edges and certify terminal connectivity by sending one unit of flow from a root terminal to every other terminal through the selected subgraph. +][ + _Construction._ Fix a root terminal $r in R$. Variables: binary $y_(u,v)$ for each undirected edge $\{u,v\}$ and nonnegative flow variables $f^t_(u,v)$ on each directed edge orientation for every terminal $t in R backslash {r}$. The ILP is: + $ + min quad & sum_({u,v} in E) w_(u,v) y_(u,v) \ + "subject to" quad & sum_(u) f^t_(u,v) - sum_(w) f^t_(v,w) = b_(t,v) quad forall t in R backslash {r}, v in V \ + & f^t_(u,v) <= y_(u,v) quad forall {u, v} in E, t in R backslash {r} \ + & f^t_(v,u) <= y_(u,v) quad forall {u, v} in E, t in R backslash {r} \ + & y_(u,v) in {0, 1}, f^t_(u,v) in ZZ_(>=0) + $, + where $b_(t,v) = -1$ if $v = r$, $b_(t,v) = 1$ if $v = t$, and $b_(t,v) = 0$ otherwise. + + _Correctness._ ($arrow.r.double$) A Steiner tree supports a unit flow from the root to every other terminal using exactly its selected edges, with the same total weight. ($arrow.l.double$) Any feasible ILP solution selects a connected subgraph spanning all terminals, and with nonnegative edge weights an optimum solution is a minimum-weight Steiner tree. + + _Solution extraction._ Output the binary edge-selection vector $(y_e)_(e in E)$. +] + +// Scheduling + +#reduction-rule("FlowShopScheduling", "ILP")[ + Order the jobs with pairwise precedence bits and completion-time variables on every machine; the deadline becomes a makespan bound. +][ + _Construction._ Let $q in {1, dots, m}$ index the machines, let $p_(j,q) = ell(t_q [j])$ be the processing time of job $j$ on machine $q$, and let $M = D + max_(j, q) p_(j,q)$. Variables: binary $y_(i,j)$ with $y_(i,j) = 1$ iff job $i$ precedes job $j$, and integer completion times $C_(j,q)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & y_(i,j) + y_(j,i) = 1 quad forall i != j \ + & C_(j,1) >= p_(j,1) quad forall j \ + & C_(j,q + 1) >= C_(j,q) + p_(j,q + 1) quad forall j, q in {1, dots, m - 1} \ + & C_(j,q) >= C_(i,q) + p_(j,q) - M (1 - y_(i,j)) quad forall i != j, q in {1, dots, m} \ + & C_(j,m) <= D quad forall j \ + & y_(i,j) in {0, 1}, C_(j,q) in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any feasible flow-shop permutation induces a total order and completion times satisfying the machine and deadline constraints. ($arrow.l.double$) Any feasible ILP solution defines one common order of the jobs on all machines, and the resulting schedule completes by the deadline. + + _Solution extraction._ Sort the jobs by their final-machine completion times $C_(j,m)$ and convert that permutation to Lehmer code. +] + +#reduction-rule("MinimumTardinessSequencing", "ILP")[ + A position-assignment ILP captures the permutation, the precedence constraints, and a binary tardy indicator for each unit-length task. +][ + _Construction._ Variables: binary $x_(j,p)$ placing task $j$ in position $p in {0, dots, n-1}$ and binary tardy indicators $u_j$, where $M = n$. The ILP is: + $ + min quad & sum_j u_j \ + "subject to" quad & sum_p x_(j,p) = 1 quad forall j \ + & sum_j x_(j,p) = 1 quad forall p \ + & sum_p p x_(i,p) + 1 <= sum_p p x_(j,p) quad "for each precedence" (i, j) \ + & sum_p (p + 1) x_(j,p) - d_j <= M u_j quad forall j \ + & x_(j,p), u_j in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible schedule gives a permutation and tardy bits with objective equal to the number of tardy tasks. ($arrow.l.double$) Any feasible ILP assignment decodes to a precedence-respecting permutation, and each $u_j$ is forced to record whether task $j$ misses its deadline. + + _Solution extraction._ Decode the permutation from $x_(j,p)$ and encode it as Lehmer code. +] + +#reduction-rule("ResourceConstrainedScheduling", "ILP")[ + The source witness is already a time-slot assignment, so a standard time-indexed ILP suffices. +][ + _Construction._ Variables: binary $x_(j,t)$ with $x_(j,t) = 1$ iff task $j$ is run in slot $t in {0, dots, D - 1}$, where $r_(j,q) = R_q(t_j)$ denotes the amount of resource $q$ consumed by task $j$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_t x_(j,t) = 1 quad forall j \ + & sum_j x_(j,t) <= m quad forall t \ + & sum_j r_(j,q) x_(j,t) <= B_q quad forall q, t \ + & x_(j,t) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible schedule chooses one slot per task while respecting processor and resource capacities in every period. ($arrow.l.double$) Any feasible ILP solution directly gives such a slot assignment. + + _Solution extraction._ Task $j$ is assigned to the unique slot $t$ with $x_(j,t) = 1$. +] + +#reduction-rule("SequencingToMinimizeMaximumCumulativeCost", "ILP")[ + Assign each task to one position in the permutation and bound the running cumulative cost at every prefix. +][ + _Construction._ Variables: binary $x_(j,p)$ with $x_(j,p) = 1$ iff task $j$ is scheduled in position $p$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(j,p) = 1 quad forall j \ + & sum_j x_(j,p) = 1 quad forall p \ + & sum_p p x_(i,p) + 1 <= sum_p p x_(j,p) quad "for each precedence" (i, j) \ + & sum_j sum_(p in {0, dots, q}) c_j x_(j,p) <= K quad forall q \ + & x_(j,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A feasible permutation satisfies the precedence constraints and keeps every prefix sum at most $K$. ($arrow.l.double$) Any feasible ILP assignment is a permutation whose cumulative cost after each prefix is exactly the linear expression being bounded. + + _Solution extraction._ Decode the position assignment and convert the resulting permutation to Lehmer code. +] + +#reduction-rule("SequencingToMinimizeWeightedTardiness", "ILP")[ + Encode the single-machine order with pairwise precedence bits and completion times, then linearize the weighted tardiness bound with nonnegative tardiness variables. +][ + _Construction._ Variables: binary $y_(i,j)$ with $y_(i,j) = 1$ iff job $i$ precedes job $j$, integer completion times $C_j$, and nonnegative tardiness variables $T_j$, where $M = sum_j ell_j$ is a valid schedule-horizon bound. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & y_(i,j) + y_(j,i) = 1 quad forall i != j \ + & C_j >= ell_j quad forall j \ + & C_j >= C_i + ell_j - M (1 - y_(i,j)) quad forall i != j \ + & T_j >= C_j - d_j quad forall j \ + & T_j >= 0 quad forall j \ + & sum_j w_j T_j <= K \ + & y_(i,j) in {0, 1}, C_j in ZZ_(>=0), T_j in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any job order induces completion times and tardiness values satisfying the bound exactly when the source instance is feasible. ($arrow.l.double$) Any feasible ILP solution yields a single-machine order whose weighted tardiness equals the encoded linear objective term. + + _Solution extraction._ Sort the jobs by $C_j$ and encode that permutation as Lehmer code. +] + +#reduction-rule("SequencingWithReleaseTimesAndDeadlines", "ILP")[ + A time-indexed formulation captures the admissible start window of each task and forbids overlap on the single machine. +][ + _Construction._ Variables: binary $x_(j,t)$ with $x_(j,t) = 1$ iff task $j$ starts at time $t$, where $p_j = ell(t_j)$ is the processing time (length) of task $j$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(t = r_j)^(d_j - p_j) x_(j,t) = 1 quad forall j \ + & sum_(j, t : t <= tau < t + p_j) x_(j,t) <= 1 quad forall tau \ + & x_(j,t) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible non-preemptive schedule chooses one valid start time per task and never overlaps two active jobs. ($arrow.l.double$) Any feasible ILP solution gives exactly such a start-time assignment, so executing the jobs in increasing start order solves the source instance. + + _Solution extraction._ Read each task's chosen start time, sort the tasks by that order, and encode the resulting permutation as Lehmer code. +] + +#reduction-rule("TimetableDesign", "ILP")[ + The source witness is a binary craftsman-task-period incidence table, and all feasibility conditions are already linear. +][ + _Construction._ Variables: binary $x_(c,t,h)$ with $x_(c,t,h) = 1$ iff craftsman $c$ works on task $t$ in period $h$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & x_(c,t,h) = 0 quad "whenever either side is unavailable" \ + & sum_t x_(c,t,h) <= 1 quad forall c, h \ + & sum_c x_(c,t,h) <= 1 quad forall t, h \ + & sum_h x_(c,t,h) = r_(c,t) quad forall c, t \ + & x_(c,t,h) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any valid timetable satisfies availability, exclusivity, and exact requirement counts. ($arrow.l.double$) Any feasible ILP solution is exactly such a timetable because the variable layout matches the source configuration. + + _Solution extraction._ Output the flattened binary array $(x_(c,t,h))$ in source order. +] + +// Position/Assignment + +#reduction-rule("HamiltonianPath", "ILP")[ + Place each vertex in exactly one path position and use auxiliary variables for consecutive pairs so only graph edges may appear between adjacent positions. +][ + _Construction._ Variables: binary $x_(v,p)$ with $x_(v,p) = 1$ iff vertex $v$ is placed at position $p$, and binary $z_((u,v),p)$ linearizing $x_(u,p) x_(v,p+1)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(v,p) = 1 quad forall v \ + & sum_v x_(v,p) = 1 quad forall p \ + & z_((u,v),p) <= x_(u,p) quad forall (u, v), p \ + & z_((u,v),p) <= x_(v,p+1) quad forall (u, v), p \ + & z_((u,v),p) >= x_(u,p) + x_(v,p+1) - 1 quad forall (u, v), p \ + & sum_((u,v) in E) z_((u,v),p) = 1 quad forall p \ + & x_(v,p), z_((u,v),p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A Hamiltonian path defines a permutation of the vertices and therefore a feasible assignment matrix with one admissible graph edge between every consecutive pair. ($arrow.l.double$) Any feasible ILP solution is a vertex permutation whose consecutive pairs are graph edges, hence a Hamiltonian path. + + _Solution extraction._ For each position $p$, output the unique vertex $v$ with $x_(v,p) = 1$. +] + +#reduction-rule("BottleneckTravelingSalesman", "ILP")[ + Use a cyclic position assignment for the tour and a bottleneck variable that dominates the weight of every chosen tour edge. +][ + _Construction._ Variables: binary $x_(v,p)$ for city-position assignment, binary $z_((u,v),p)$ for consecutive tour edges, and integer bottleneck variable $b$. The ILP is: + $ + min quad & b \ + "subject to" quad & sum_p x_(v,p) = 1 quad forall v \ + & sum_v x_(v,p) = 1 quad forall p \ + & z_((u,v),p) <= x_(u,p) quad forall (u, v), p \ + & z_((u,v),p) <= x_(v,(p+1) mod n) quad forall (u, v), p \ + & z_((u,v),p) >= x_(u,p) + x_(v,(p+1) mod n) - 1 quad forall (u, v), p \ + & sum_((u,v) in E) z_((u,v),p) = 1 quad forall p \ + & b >= w_(u,v) z_((u,v),p) quad forall (u, v), p \ + & x_(v,p), z_((u,v),p) in {0, 1}, b in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any Hamiltonian tour yields a feasible assignment and sets $b$ to the maximum selected edge weight. ($arrow.l.double$) Any feasible ILP solution encodes a Hamiltonian cycle, and the minimax constraints force $b$ to equal its bottleneck edge weight. + + _Solution extraction._ Mark an edge selected in the source config iff it appears between two consecutive positions in the decoded cycle. +] + +#reduction-rule("LongestCircuit", "ILP")[ + A direct cycle-selection ILP uses binary edge variables, degree constraints, and a connectivity witness to force exactly one simple circuit of length at least the bound. +][ + _Construction._ Variables: binary $y_e$ for edges, binary $s_v$ indicating whether vertex $v$ lies on the circuit, and root-flow variables on selected edges. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(e : v in e) y_e = 2 s_v quad forall v \ + & sum_e y_e >= 3 \ + & sum_e l_e y_e >= K \ + & "root-flow connectivity constraints hold on the selected edges" \ + & y_e, s_v in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A simple circuit has degree 2 at each used vertex, is connected, and meets the length bound $K$. ($arrow.l.double$) The degree and connectivity constraints force the selected edges to form exactly one simple circuit, and the final inequality enforces the required total length. + + _Solution extraction._ Output the binary edge-selection vector $(y_e)_(e in E)$. +] + +#reduction-rule("QuadraticAssignment", "ILP")[ + Assign each facility to exactly one location, enforce injectivity, and linearize every quadratic cost term with McCormick products. +][ + _Construction._ Variables: binary $x_(i,p)$ with $x_(i,p) = 1$ iff facility $i$ is placed at location $p$, and binary $z_((i,p),(j,q))$ for the products $x_(i,p) x_(j,q)$. The ILP is: + $ + min quad & sum_(i != j) sum_(p,q) c_(i,j) d_(p,q) z_((i,p),(j,q)) \ + "subject to" quad & sum_p x_(i,p) = 1 quad forall i \ + & sum_i x_(i,p) <= 1 quad forall p \ + & z_((i,p),(j,q)) <= x_(i,p) quad forall i, p, j, q \ + & z_((i,p),(j,q)) <= x_(j,q) quad forall i, p, j, q \ + & z_((i,p),(j,q)) >= x_(i,p) + x_(j,q) - 1 quad forall i, p, j, q \ + & x_(i,p), z_((i,p),(j,q)) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any injective facility placement gives a feasible ILP assignment with exactly the same quadratic cost. ($arrow.l.double$) Any feasible ILP solution decodes to an injective facility-to-location map, and the linearized objective equals the source objective term by term. + + _Solution extraction._ For each facility $i$, output the unique location $p$ with $x_(i,p) = 1$. +] + +#reduction-rule("OptimalLinearArrangement", "ILP")[ + Assign each vertex to one position and use absolute-value auxiliaries to measure the length of every edge in the arrangement. +][ + _Construction._ Variables: binary $x_(v,p)$ with $x_(v,p) = 1$ iff vertex $v$ gets position $p$, integer position variables $p_v = sum_p p x_(v,p)$, and nonnegative $z_(u,v)$ per edge $\{u,v\}$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(v,p) = 1 quad forall v \ + & sum_v x_(v,p) = 1 quad forall p \ + & z_(u,v) >= p_u - p_v quad forall {u, v} in E \ + & z_(u,v) >= p_v - p_u quad forall {u, v} in E \ + & sum_({u,v} in E) z_(u,v) <= K \ + & x_(v,p) in {0, 1}, z_(u,v) in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any valid linear arrangement satisfies the permutation constraints and gives edge lengths $|p_u - p_v|$ within the bound. ($arrow.l.double$) Any feasible ILP solution is a bijection from vertices to positions, and the auxiliary variables exactly upper-bound the edge lengths, so the total arrangement cost is at most $K$. + + _Solution extraction._ For each vertex $v$, output its decoded position $p_v$. +] + +#reduction-rule("SubgraphIsomorphism", "ILP")[ + Choose an injective image of every pattern vertex in the host graph and forbid any mapped pattern edge from landing on a host non-edge. +][ + _Construction._ Variables: binary $x_(v,u)$ with $x_(v,u) = 1$ iff pattern vertex $v$ maps to host vertex $u$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_u x_(v,u) = 1 quad forall v \ + & sum_v x_(v,u) <= 1 quad forall u \ + & x_(v,u) + x_(w,u') <= 1 quad forall {v, w} in E_"pat", {u, u'} in.not E_"host" \ + & x_(v,u') + x_(w,u) <= 1 quad forall {v, w} in E_"pat", {u, u'} in.not E_"host" \ + & x_(v,u) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any injective edge-preserving embedding satisfies the assignment and non-edge constraints. ($arrow.l.double$) Any feasible ILP solution is an injective vertex map, and the non-edge inequalities ensure every pattern edge is sent to a host edge. + + _Solution extraction._ For each pattern vertex $v$, output the unique host vertex $u$ with $x_(v,u) = 1$. +] + +// Graph structure + +#reduction-rule("AcyclicPartition", "ILP")[ + Assign every vertex to one partition class, bound the weight and crossing cost of those classes, and impose a topological order on the quotient digraph. +][ + _Construction._ Let $n = |V|$ and let the directed arcs be $A = {a_0, dots, a_(m-1)}$ with $a_t = (u_t -> v_t)$. The source witness already allows every vertex to choose one label in ${0, dots, n - 1}$, so the ILP uses exactly the same label range. Use `ILP` with variable order + $(x_(v,c))_(v,c), (s_(t,c))_(t,c), (y_t)_t, (o_c)_c, (p_v)_v$. + The indices are + $"idx"_x(v,c) = v n + c$, + $"idx"_s(t,c) = n^2 + t n + c$, + $"idx"_y(t) = n^2 + m n + t$, + $"idx"_o(c) = n^2 + m n + m + c$, + and $ "idx"_p(v) = n^2 + m n + m + n + v$. + There are $n^2 + m n + m + 2 n$ variables. + + Here $x_(v,c) in {0, 1}$ means vertex $v$ is assigned to class $c$, $s_(t,c) in {0, 1}$ means both endpoints of arc $a_t$ lie in class $c$, $y_t in {0, 1}$ marks that arc $a_t$ crosses between two different classes, $o_c in {0, dots, n - 1}$ is the order assigned to class $c$, and $p_v in {0, dots, n - 1}$ copies the order of the class chosen by vertex $v$. + + The constraints are: + $sum_(c = 0)^(n - 1) x_(v,c) = 1$ for every vertex $v$; + $sum_v w_v x_(v,c) <= B$ for every class $c$; + $s_(t,c) <= x_(u_t,c)$, $s_(t,c) <= x_(v_t,c)$, and $s_(t,c) >= x_(u_t,c) + x_(v_t,c) - 1$ for every arc $a_t$ and class $c$; + $y_t + sum_(c = 0)^(n - 1) s_(t,c) = 1$ for every arc $a_t$, so $y_t = 1$ exactly for crossing arcs; + $sum_(t = 0)^(m - 1) "cost"(a_t) y_t <= K$; + $0 <= o_c <= n - 1$ and $0 <= p_v <= n - 1$ for all classes $c$ and vertices $v$; + $p_v - o_c <= (n - 1) (1 - x_(v,c))$ and $o_c - p_v <= (n - 1) (1 - x_(v,c))$ for all $v, c$, so $p_v = o_c$ whenever $x_(v,c) = 1$; + and for every arc $a_t = (u_t -> v_t)$, + $p_(v_t) - p_(u_t) >= 1 - n sum_(c = 0)^(n - 1) s_(t,c)$. + The exact big-$M$ here is $M = n$: if $u_t$ and $v_t$ lie in the same class, then $sum_c s_(t,c) = 1$ and the right-hand side is $1 - n = -(n - 1)$, which is precisely the smallest possible difference between two order variables in ${0, dots, n - 1}$. If the arc crosses between two distinct classes, then $sum_c s_(t,c) = 0$ and the inequality becomes $p_(v_t) - p_(u_t) >= 1$. For the realized classes $c$ and $d$ of the endpoints, this is exactly the requested form + $o_d - o_c >= 1 - M sum_h s_(t,h)$. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(c = 0)^(n - 1) x_(v,c) = 1 quad forall v in V \ + & sum_v w_v x_(v,c) <= B quad forall c in {0, dots, n - 1} \ + & s_(t,c) <= x_(u_t,c), s_(t,c) <= x_(v_t,c) quad forall t, c \ + & s_(t,c) >= x_(u_t,c) + x_(v_t,c) - 1 quad forall t, c \ + & y_t + sum_(c = 0)^(n - 1) s_(t,c) = 1 quad forall t in {0, dots, m - 1} \ + & sum_(t = 0)^(m - 1) "cost"(a_t) y_t <= K \ + & p_v - o_c <= (n - 1) (1 - x_(v,c)), o_c - p_v <= (n - 1) (1 - x_(v,c)) quad forall v, c \ + & p_(v_t) - p_(u_t) >= 1 - n sum_(c = 0)^(n - 1) s_(t,c) quad forall t in {0, dots, m - 1} \ + & x_(v,c), s_(t,c), y_t in {0, 1}; o_c, p_v in {0, dots, n - 1} + $. + + _Correctness._ ($arrow.r.double$) Any valid acyclic partition gives a class assignment whose quotient arcs respect some topological ordering, with the same class weights and crossing cost. ($arrow.l.double$) Any feasible ILP solution partitions the vertices, keeps every class within the weight bound, charges exactly the inter-class arcs, and the order variables force the quotient digraph to be acyclic. + + _Solution extraction._ For each vertex $v$, output the unique class label $c$ with $x_(v,c) = 1$. +] + +#reduction-rule("BalancedCompleteBipartiteSubgraph", "ILP")[ + Choose exactly $k$ vertices on each side of the bipartite graph and forbid any selected left-right pair that is not an edge. +][ + _Construction._ Let $L$ and $R$ be the bipartition. Variables: binary $x_l$ for $l in L$ and $y_r$ for $r in R$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(l in L) x_l = k \ + & sum_(r in R) y_r = k \ + & x_l + y_r <= 1 quad forall (l, r) in.not E \ + & x_l, y_r in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A balanced complete bipartite subgraph of size $k + k$ satisfies the cardinality constraints and has no selected non-edge pair. ($arrow.l.double$) Any feasible ILP solution selects $k$ left vertices and $k$ right vertices with every cross-pair present, hence a balanced biclique. + + _Solution extraction._ Output the concatenated left/right binary selection vector. +] + +#reduction-rule("BicliqueCover", "ILP")[ + Use $k$ candidate bicliques, assign vertices to any of them, force every graph edge to be covered by some common biclique, and minimize the total membership size. +][ + _Construction._ Variables: binary $x_(l,b)$ for left vertices, binary $y_(r,b)$ for right vertices, and binary $z_((l,r),b)$ linearizing $x_(l,b) y_(r,b)$. The ILP is: + $ + min quad & sum_(l,b) x_(l,b) + sum_(r,b) y_(r,b) \ + "subject to" quad & z_((l,r),b) <= x_(l,b) quad forall l, r, b \ + & z_((l,r),b) <= y_(r,b) quad forall l, r, b \ + & z_((l,r),b) >= x_(l,b) + y_(r,b) - 1 quad forall l, r, b \ + & sum_b z_((l,r),b) >= 1 quad forall (l, r) in E \ + & x_(l,b) + y_(r,b) <= 1 quad forall (l, r) in.not E, b \ + & x_(l,b), y_(r,b), z_((l,r),b) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any valid $k$-biclique cover assigns each covered edge to a biclique containing both endpoints, with objective equal to the total biclique size. ($arrow.l.double$) Any feasible ILP solution defines $k$ complete bipartite subgraphs whose union covers every edge, and the objective is exactly the source objective. + + _Solution extraction._ Output the flattened vertex-by-biclique membership bits and discard the coverage auxiliaries. +] + +#reduction-rule("BiconnectivityAugmentation", "ILP")[ + Select candidate edges under the budget and, for every deleted vertex, certify that the remaining augmented graph stays connected by a flow witness. +][ + _Construction._ Let the base graph edges be $E = {e_0, dots, e_(m-1)}$ with $e_i = {u_i, v_i}$, and let the candidate edges be $F = {f_0, dots, f_(p-1)}$ with $f_j = {s_j, t_j}$. If $n = |V| <= 1$, return the empty feasible ILP, since every 0- or 1-vertex graph is already biconnected in the model. Otherwise fix, for each deleted vertex $q$, the surviving root + $r_q = 0$ if $q != 0$, and $r_0 = 1$. + This choice is explicit and valid because $n >= 2$. + + Use `ILP`. The candidate-selection bits are $y_j in {0, 1}$ with index $j$. For the connectivity witnesses, allocate the full $(q, t)$ commodity grid with $q, t in {0, dots, n - 1}$, even though the commodities with $t = q$ or $t = r_q$ will be pinned to 0. For each base edge $e_i$ and orientation flag $eta in {0, 1}$, let $eta = 0$ mean $u_i -> v_i$ and $eta = 1$ mean $v_i -> u_i$; define binary flow variables $f^(q,t)_(i,eta)$ with index + $p + (((q n + t) m + i) 2 + eta)$. + For each candidate edge $f_j$ and orientation flag $eta in {0, 1}$, let $eta = 0$ mean $s_j -> t_j$ and $eta = 1$ mean $t_j -> s_j$; define binary flow variables $g^(q,t)_(j,eta)$ with index + $p + 2 m n^2 + (((q n + t) p + j) 2 + eta)$. + There are $p + 2 n^2 (m + p)$ variables in total. + + The constraints are: + $sum_(j = 0)^(p - 1) w_j y_j <= B$; + for every deleted vertex $q$ and target $t$, if $t = q$ or $t = r_q$, set all $f^(q,t)_(i,eta)$ and $g^(q,t)_(j,eta)$ equal to 0; + if the deleted vertex $q$ is incident to base edge $e_i$ or candidate edge $f_j$, set the corresponding directed flow variables for that $(q,t)$ to 0, because they do not exist in $G - q$; + for each candidate edge variable, the exact activation big-$M$ is 1: + $g^(q,t)_(j,eta) <= y_j$ for every $q, t, j, eta$; + and for every valid pair $(q,t)$ with $t in.not {q, r_q}$ and every surviving vertex $v != q$, impose flow conservation + $sum_"out of v" (f^(q,t) + g^(q,t)) - sum_"into v" (f^(q,t) + g^(q,t)) = 1$ + when $v = r_q$, + $= -1$ when $v = t$, + and $= 0$ otherwise. + The sums range over both orientations of all base and candidate edges that avoid $q$. Since every commodity carries exactly one unit, binary flows are sufficient. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(j = 0)^(p - 1) w_j y_j <= B \ + & f^(q,t)_(i,eta) = 0 quad "whenever" t in {q, r_q} "or" e_i "is incident to" q \ + & g^(q,t)_(j,eta) = 0 quad "whenever" t in {q, r_q} "or" f_j "is incident to" q \ + & g^(q,t)_(j,eta) <= y_j quad forall q, t in {0, dots, n - 1}, j in {0, dots, p - 1}, eta in {0, 1} \ + & "for each valid pair" (q,t) ", unit-flow conservation from" r_q "to" t "holds in" G - q \ + & y_j, f^(q,t)_(i,eta), g^(q,t)_(j,eta) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) If the chosen augmentation makes the graph biconnected, then every vertex-deleted graph is connected and therefore supports the required flows. ($arrow.l.double$) If the ILP is feasible, then removing any single vertex leaves a connected graph, which is exactly the definition of biconnectivity for the augmented graph. + + _Solution extraction._ Output the binary selection vector of candidate edges. +] + +#reduction-rule("BoundedComponentSpanningForest", "ILP")[ + Assign every vertex to one of at most $K$ components, bound each component's total weight, and certify connectivity inside each used component by a flow witness. +][ + _Construction._ Let $n = |V|$, let the graph edges be $E = {e_0, dots, e_(m-1)}$ with $e_i = {u_i, v_i}$, and let the allowed component labels be $c in {0, dots, K - 1}$. Use `ILP` with variables ordered as + $(x_(v,c))_(v,c), (u_c)_c, (r_(v,c))_(v,c), (s_c)_c, (b_(v,c))_(v,c), (f_(i,eta,c))_(i,eta,c)$. + Their indices are + $"idx"_x(v,c) = v K + c$, + $"idx"_u(c) = n K + c$, + $"idx"_r(v,c) = n K + K + v K + c$, + $"idx"_s(c) = 2 n K + K + c$, + $"idx"_b(v,c) = 2 n K + 2 K + v K + c$, + and, with $eta = 0$ meaning $u_i -> v_i$ and $eta = 1$ meaning $v_i -> u_i$, + $"idx"_f(i, eta, c) = 3 n K + 2 K + (i 2 + eta) K + c$. + There are $3 n K + 2 K + 2 m K$ variables. + + Here $x_(v,c) in {0, 1}$ means vertex $v$ is placed in component $c$, $u_c in {0, 1}$ says that component $c$ is nonempty, $r_(v,c) in {0, 1}$ chooses the root of nonempty component $c$, $s_c in {0, dots, n}$ is its size, $b_(v,c) in {0, dots, n}$ linearizes the product $s_c r_(v,c)$, and $f_(i,eta,c) in {0, dots, n - 1}$ is the root-flow on the chosen component edges. + + The constraints are: + $sum_(c = 0)^(K - 1) x_(v,c) = 1$ for every vertex $v$; + $sum_v w_v x_(v,c) <= B$ for every component label $c$; + $s_c = sum_v x_(v,c)$ for every $c$; + $u_c <= s_c$ and $s_c <= n u_c$ for every $c$, so $u_c = 1$ iff the component is nonempty; + $sum_v r_(v,c) = u_c$ and $r_(v,c) <= x_(v,c)$ for every $c$ and $v$, which chooses exactly one root in every nonempty component; + the product linearization $b_(v,c) <= s_c$, $b_(v,c) <= n r_(v,c)$, $b_(v,c) >= s_c - n (1 - r_(v,c))$, $b_(v,c) >= 0$ for every $v, c$, so $b_(v,c) = s_c r_(v,c)$; the exact big-$M$ here is $n$; + for every edge $e_i = {u_i, v_i}$, orientation flag $eta in {0, 1}$, and component $c$, + $0 <= f_(i,eta,c) <= (n - 1) x_(u_i,c)$ and $0 <= f_(i,eta,c) <= (n - 1) x_(v_i,c)$. + The exact capacity big-$M$ is $n - 1$: a component of size $s_c$ needs to route at most $s_c - 1 <= n - 1$ units across any oriented edge of a spanning tree. + Finally, for every vertex $v$ and component $c$, + $sum_"out of v in c" f - sum_"into v in c" f = b_(v,c) - x_(v,c)$. + If $v$ is the chosen root of component $c$, then the right-hand side is $s_c - 1$; every other assigned vertex consumes one unit; unassigned vertices have right-hand side 0. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(c = 0)^(K - 1) x_(v,c) = 1 quad forall v in V \ + & sum_v w_v x_(v,c) <= B quad forall c in {0, dots, K - 1} \ + & s_c = sum_v x_(v,c) quad forall c in {0, dots, K - 1} \ + & u_c <= s_c <= n u_c quad forall c in {0, dots, K - 1} \ + & sum_v r_(v,c) = u_c, r_(v,c) <= x_(v,c) quad forall v, c \ + & "the standard product linearization enforces" b_(v,c) = s_c r_(v,c) quad forall v, c \ + & 0 <= f_(i,eta,c) <= (n - 1) x_(u_i,c), 0 <= f_(i,eta,c) <= (n - 1) x_(v_i,c) quad forall i, eta, c \ + & sum_"out of v in c" f - sum_"into v in c" f = b_(v,c) - x_(v,c) quad forall v, c \ + & x_(v,c), u_c, r_(v,c) in {0, 1}; s_c in {0, dots, n}; b_(v,c), f_(i,eta,c) in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any valid bounded-component partition assigns each component a connected supporting subgraph and respects the weight bound. ($arrow.l.double$) Any feasible ILP solution partitions the vertices into at most $K$ connected sets, each of total weight at most $B$, exactly as required by the source problem. + + _Solution extraction._ For each vertex $v$, output the unique component label $c$ with $x_(v,c) = 1$. +] + +#reduction-rule("MinimumCutIntoBoundedSets", "ILP")[ + A binary side variable for each vertex, together with cut indicators on the edges, directly linearizes the bounded two-way cut conditions. +][ + _Construction._ Variables: binary $x_v$ with $x_v = 1$ iff $v$ is placed on the sink side, and binary $y_e$ for edges. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & x_s = 0 \ + & x_t = 1 \ + & sum_v x_v <= B \ + & sum_v (1 - x_v) <= B \ + & y_e >= x_u - x_v quad forall e = {u, v} in E \ + & y_e >= x_v - x_u quad forall e = {u, v} in E \ + & sum_e w_e y_e <= K \ + & x_v, y_e in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible bounded cut determines a 0/1 side assignment, and the edge indicators are 1 exactly on the cut edges. ($arrow.l.double$) Any feasible ILP solution partitions the vertices into two bounded sets with $s$ and $t$ separated and total cut weight at most $K$. + + _Solution extraction._ Output the partition bit-vector $(x_v)_(v in V)$. +] + +#reduction-rule("StrongConnectivityAugmentation", "ILP")[ + Select candidate arcs under the budget and certify strong connectivity by sending flow both from a root to every vertex and back again. +][ + _Construction._ Let the base arcs be $A = {a_0, dots, a_(m-1)}$ with $a_i = (u_i, v_i)$, let the candidate arcs be $C = {c_0, dots, c_(p-1)}$ with $c_j = (s_j, t_j)$, and, when $n = |V| >= 1$, fix the root to be vertex $r = 0$. If $n <= 1$, return the empty feasible ILP. Use `ILP` with variables ordered as + $(y_j)_j, (f^t_i)_(t,i), (bar(f)^t_j)_(t,j), (g^t_i)_(t,i), (bar(g)^t_j)_(t,j)$, + where $f^t$ is the forward root-to-$t$ flow on base arcs, $bar(f)^t$ is the forward flow on candidate arcs, $g^t$ is the backward $t$-to-root flow on base arcs, and $bar(g)^t$ is the backward flow on candidate arcs. + The indices are + $"idx"_y(j) = j$, + $"idx"_(f^t_i) = p + t m + i$, + $"idx"_(bar(f)^t_j) = p + n m + t p + j$, + $"idx"_(g^t_i) = p + n (m + p) + t m + i$, + and $ "idx"_(bar(g)^t_j) = p + n (2 m + p) + t p + j$. + There are $p + 2 n (m + p)$ variables. + + The constraints are: + $sum_(j = 0)^(p - 1) w_j y_j <= B$; + for the dummy commodity $t = r$, set all four flow blocks $f^r_i$, $bar(f)^r_j$, $g^r_i$, $bar(g)^r_j$ to 0; + for every candidate arc and target vertex, use the exact activation big-$M = 1$: + $bar(f)^t_j <= y_j$ and $bar(g)^t_j <= y_j$ for all $t, j$; + for every $t != r$ and every vertex $v$, + $sum_"out of v" (f^t + bar(f)^t) - sum_"into v" (f^t + bar(f)^t) = 1$ + when $v = r$, + $= -1$ when $v = t$, + and $= 0$ otherwise; + and + $sum_"out of v" (g^t + bar(g)^t) - sum_"into v" (g^t + bar(g)^t) = 1$ + when $v = t$, + $= -1$ when $v = r$, + and $= 0$ otherwise. + All flow variables are binary, because each commodity carries a single unit. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(j = 0)^(p - 1) w_j y_j <= B \ + & f^r_i = 0, bar(f)^r_j = 0, g^r_i = 0, bar(g)^r_j = 0 \ + & bar(f)^t_j <= y_j, bar(g)^t_j <= y_j quad forall t in {0, dots, n - 1}, j in {0, dots, p - 1} \ + & "root-to-target unit-flow conservation holds on" f^t, bar(f)^t quad forall t != r \ + & "target-to-root unit-flow conservation holds on" g^t, bar(g)^t quad forall t != r \ + & y_j, f^t_i, bar(f)^t_j, g^t_i, bar(g)^t_j in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A strongly connected augmentation provides both directions of reachability between the root and every other vertex, hence all required flows. ($arrow.l.double$) If those flows exist for every vertex, then every vertex is reachable from the root and can reach the root, so the augmented digraph is strongly connected. + + _Solution extraction._ Output the binary candidate-arc selection vector $(y_a)$. +] + +// Matrix/encoding + +#reduction-rule("BMF", "ILP")[ + Split the witness into binary factor matrices $B$ and $C$, reconstruct their Boolean product with McCormick auxiliaries, and minimize the Hamming distance to the target matrix. +][ + _Construction._ Variables: binary $b_(i,r)$, binary $c_(r,j)$, binary $p_(i,r,j)$ linearizing $b_(i,r) c_(r,j)$, binary $w_(i,j)$ for the reconstructed entry, and nonnegative error variables $e_(i,j)$. The ILP is: + $ + min quad & sum_(i,j) e_(i,j) \ + "subject to" quad & p_(i,r,j) <= b_(i,r) quad forall i, r, j \ + & p_(i,r,j) <= c_(r,j) quad forall i, r, j \ + & p_(i,r,j) >= b_(i,r) + c_(r,j) - 1 quad forall i, r, j \ + & w_(i,j) >= p_(i,r,j) quad forall i, r, j \ + & w_(i,j) <= sum_r p_(i,r,j) quad forall i, j \ + & e_(i,j) >= A_(i,j) - w_(i,j) quad forall i, j \ + & e_(i,j) >= w_(i,j) - A_(i,j) quad forall i, j \ + & b_(i,r), c_(r,j), p_(i,r,j), w_(i,j) in {0, 1}, e_(i,j) in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any choice of factor matrices induces the same Boolean product and Hamming error in the ILP. ($arrow.l.double$) Any feasible ILP assignment determines factor matrices $B$ and $C$, and the linearization forces the objective to equal the Hamming distance between $A$ and $B dot C$. + + _Solution extraction._ Output the flattened bits of $B$ followed by the flattened bits of $C$, discarding the reconstruction auxiliaries. +] + +#reduction-rule("ConsecutiveBlockMinimization", "ILP")[ + Permute the columns with a one-hot assignment and count row-wise block starts by detecting each 0-to-1 transition after permutation. +][ + _Construction._ Variables: binary $x_(c,p)$ with $x_(c,p) = 1$ iff column $c$ goes to position $p$, binary $a_(r,p)$ for the value seen by row $r$ at position $p$, and binary block-start indicators $b_(r,p)$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(c,p) = 1 quad forall c \ + & sum_c x_(c,p) = 1 quad forall p \ + & a_(r,p) = sum_c A_(r,c) x_(c,p) quad forall r, p \ + & b_(r,0) = a_(r,0) quad forall r \ + & b_(r,p) >= a_(r,p) - a_(r,p-1) quad forall r, p > 0 \ + & sum_(r,p) b_(r,p) <= K \ + & x_(c,p), a_(r,p), b_(r,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any column permutation determines exactly one block-start variable for each maximal run of 1s in every row. ($arrow.l.double$) A feasible ILP solution is a column permutation whose counted block starts sum to at most $K$, which is precisely the source criterion. + + _Solution extraction._ Decode the column permutation from $x_(c,p)$. +] + +#reduction-rule("ConsecutiveOnesMatrixAugmentation", "ILP")[ + Choose a column permutation and, for each row, choose the interval that will become its consecutive block of 1s; flips are needed only for zeros inside that interval. +][ + _Construction._ Let the matrix have $m$ rows and $n$ columns, and let $A_(r,c) in {0, 1}$ be the given entry. For each row define the constant + $beta_r = 1$ if row $r$ contains at least one 1, and $beta_r = 0$ otherwise. + Use `ILP` with variable order + $(x_(c,p))_(c,p), (a_(r,p))_(r,p), (ell_(r,p))_(r,p), (u_(r,p))_(r,p), (h_(r,p))_(r,p), (f_(r,p))_(r,p)$. + The indices are + $"idx"_x(c,p) = c n + p$, + $"idx"_a(r,p) = n^2 + r n + p$, + $"idx"_ell(r,p) = n^2 + m n + r n + p$, + $"idx"_u(r,p) = n^2 + 2 m n + r n + p$, + $"idx"_h(r,p) = n^2 + 3 m n + r n + p$, + and $ "idx"_f(r,p) = n^2 + 4 m n + r n + p$. + There are $n^2 + 5 m n$ binary variables. + + Here $x_(c,p) = 1$ means original column $c$ is placed at position $p$ of the permutation, $a_(r,p)$ is the value seen in row $r$ at permuted position $p$, $ell_(r,p)$ and $u_(r,p)$ choose the left and right interval boundaries of row $r$, $h_(r,p)$ indicates that position $p$ lies inside that chosen interval, and $f_(r,p)$ indicates that row $r$ flips a 0 to a 1 at position $p$. + + The constraints are: + $sum_p x_(c,p) = 1$ for every column $c$; + $sum_c x_(c,p) = 1$ for every position $p$; + $a_(r,p) = sum_c A_(r,c) x_(c,p)$ for every row $r$ and position $p$; + $sum_p ell_(r,p) = beta_r$ and $sum_p u_(r,p) = beta_r$ for every row $r$; + $sum_p p ell_(r,p) <= sum_p p u_(r,p) + (n - 1) (1 - beta_r)$ for every row $r$, which forces the left boundary not to exceed the right boundary when the row is nonzero; + for every row $r$ and position $p$, + $h_(r,p) <= sum_(q = 0)^p ell_(r,q)$, + $h_(r,p) <= sum_(q = p)^(n - 1) u_(r,q)$, + and + $h_(r,p) >= sum_(q = 0)^p ell_(r,q) + sum_(q = p)^(n - 1) u_(r,q) - 1$; + $a_(r,p) <= h_(r,p)$ for every $r, p$, so every original 1 lies inside the chosen interval; + $h_(r,p) <= a_(r,p) + f_(r,p)$, $f_(r,p) <= h_(r,p)$, and $f_(r,p) + a_(r,p) <= 1$ for every $r, p$, so $f_(r,p) = 1$ exactly when the position lies inside the interval but the original matrix has a 0 there; + and the augmentation budget + $sum_(r = 0)^(m - 1) sum_(p = 0)^(n - 1) f_(r,p) <= K$. + These are the exact consecutive-ones constraints: after permutation, row $r$ is 1 exactly on the positions with $h_(r,p) = 1$, and the only modifications charged are the zero-to-one flips recorded by $f$. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_p x_(c,p) = 1 quad forall c \ + & sum_c x_(c,p) = 1 quad forall p \ + & a_(r,p) = sum_c A_(r,c) x_(c,p) quad forall r, p \ + & sum_p ell_(r,p) = beta_r, sum_p u_(r,p) = beta_r quad forall r \ + & sum_p p ell_(r,p) <= sum_p p u_(r,p) + (n - 1) (1 - beta_r) quad forall r \ + & h_(r,p) <= sum_(q = 0)^p ell_(r,q), h_(r,p) <= sum_(q = p)^(n - 1) u_(r,q) quad forall r, p \ + & h_(r,p) >= sum_(q = 0)^p ell_(r,q) + sum_(q = p)^(n - 1) u_(r,q) - 1 quad forall r, p \ + & a_(r,p) <= h_(r,p); h_(r,p) <= a_(r,p) + f_(r,p); f_(r,p) <= h_(r,p); f_(r,p) + a_(r,p) <= 1 quad forall r, p \ + & sum_(r = 0)^(m - 1) sum_(p = 0)^(n - 1) f_(r,p) <= K \ + & x_(c,p), a_(r,p), ell_(r,p), u_(r,p), h_(r,p), f_(r,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A feasible augmentation chooses a permutation and flips exactly the zeros lying inside each row's final consecutive-ones interval. ($arrow.l.double$) Any feasible ILP solution yields a permuted matrix whose rows become consecutive-ones after the encoded zero-to-one augmentations, with total augmentation cost at most $K$. + + _Solution extraction._ Decode the column permutation from $x_(c,p)$ and discard the auxiliary flip variables. +] + +#reduction-rule("ConsecutiveOnesSubmatrix", "ILP")[ + Select exactly $K$ columns, permute only those selected columns, and require every row to have a single consecutive block within the chosen submatrix. +][ + _Construction._ Variables: binary selection bits $s_c$, binary placement variables $x_(c,p)$ for selected columns, and row-interval auxiliaries as in ConsecutiveOnesMatrixAugmentation. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_c s_c = K \ + & sum_p x_(c,p) = s_c quad forall c \ + & sum_c x_(c,p) = 1 quad forall p in {1, dots, K} \ + & "the selected rows satisfy the consecutive-ones interval constraints" \ + & s_c, x_(c,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any feasible column subset with a valid permutation satisfies the selection and interval constraints. ($arrow.l.double$) Any feasible ILP solution chooses exactly $K$ columns whose induced submatrix admits a consecutive-ones permutation. + + _Solution extraction._ Output the column-selection bits $(s_c)_(c = 1)^n$ and ignore the permutation auxiliaries. +] + +#reduction-rule("SparseMatrixCompression", "ILP")[ + Assign each row one shift value and forbid any pair of shifted 1-entries from colliding in the storage vector. +][ + _Construction._ Variables: binary $x_(r,g)$ with $x_(r,g) = 1$ iff row $r$ uses shift $g in {0, dots, K - 1}$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_g x_(r,g) = 1 quad forall r \ + & x_(r,g) + x_(s,h) <= 1 quad "whenever" A_(r,i) = A_(s,j) = 1 "and" i + g = j + h \ + & x_(r,g) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) A valid compression chooses one shift per row and never overlays 1-entries from different rows in the same storage position. ($arrow.l.double$) Any feasible ILP solution gives exactly such a collision-free shift assignment, hence a valid storage vector of length $n + K$. + + _Solution extraction._ For each row $r$, output the unique zero-based shift $g$ with $x_(r,g) = 1$. +] + +// Sequence/misc + +#reduction-rule("ShortestCommonSupersequence", "ILP")[ + Fill the $B$ positions of the supersequence with one-hot symbol variables and match each input string monotonically into those positions. +][ + _Construction._ Variables: binary $x_(p,a)$ with $x_(p,a) = 1$ iff position $p$ carries symbol $a$, and binary matching variables $m_(s,j,p)$ saying that the $j$-th symbol of string $s$ is matched to position $p$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_a x_(p,a) = 1 quad forall p \ + & sum_p m_(s,j,p) = 1 quad forall s, j \ + & m_(s,j,p) <= x_(p,a) quad forall s, j, p " with symbol " a \ + & "matching positions are strictly increasing in j for every string s" \ + & x_(p,a), m_(s,j,p) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any common supersequence of length at most $B$ induces a one-hot symbol assignment and a monotone match of every input string. ($arrow.l.double$) Any feasible ILP solution yields a length-$B$ string into which every source string embeds as a subsequence. + + _Solution extraction._ At each position $p$, output the unique symbol $a$ with $x_(p,a) = 1$. +] + +#reduction-rule("StringToStringCorrection", "ILP")[ + A time-expanded ILP chooses one edit operation at each of the $K$ stages and tracks the evolving string state until it matches the target. +][ + _Construction._ Let the source length be $n$, the target length be $m$, and the operation bound be $K$. If $m > n$ or $m < n - K$, return an infeasible empty ILP, because the model rejects such instances before any search. Otherwise use `ILP`. Track the evolving string by the identities of the original source positions, not only by their symbols: token $i in {0, dots, n - 1}$ carries symbol $x_i$ from the source string. + + The state variables are $z_(t,p,i)$ for $t in {0, dots, K}$, $p in {0, dots, n - 1}$, $i in {0, dots, n - 1}$, where $z_(t,p,i) = 1$ iff token $i$ occupies position $p$ after step $t$. Their indices are + $"idx"_z(t,p,i) = t n^2 + p n + i$. + The emptiness bits are $e_(t,p)$ with index + $"idx"_e(t,p) = (K + 1) n^2 + t n + p$. + The operation variables are delete bits $d_(t,j)$ for $t in {1, dots, K}$ and $j in {0, dots, n - 1}$ with index + $"idx"_d(t,j) = (K + 1) (n^2 + n) + (t - 1) n + j$; + swap bits $s_(t,j)$ for $j in {0, dots, n - 2}$ with index + $"idx"_s(t,j) = (K + 1) (n^2 + n) + K n + (t - 1) (n - 1) + j$; + and no-op bits $nu_t$ with index + $"idx"_nu(t) = (K + 1) (n^2 + n) + K n + K (n - 1) + (t - 1)$. + There are $(K + 1) (n^2 + n) + K (2 n)$ variables. + + The state validity constraints are: + $e_(t,p) + sum_i z_(t,p,i) = 1$ for every $t, p$; + $sum_p z_(t,p,i) <= 1$ for every $t, i$; + and $e_(t,p) <= e_(t,p + 1)$ for every $t$ and $p < n - 1$, forcing the active string to occupy a prefix and the deleted positions to form a suffix. + The initial state is fixed by + $z_(0,p,p) = 1$ for every $p$, + $z_(0,p,i) = 0$ for every $i != p$, + and $e_(0,p) = 0$ for every position $p$. + + At each step $t$, choose exactly one operation: + $sum_(j = 0)^(n - 1) d_(t,j) + sum_(j = 0)^(n - 2) s_(t,j) + nu_t = 1$. + Delete at position $j$ is legal only when that current position exists, so + $d_(t,j) <= 1 - e_(t - 1,j)$. + Swap at position $j$ is legal only when both positions $j$ and $j + 1$ exist, so + $s_(t,j) <= 1 - e_(t - 1,j)$ and $s_(t,j) <= 1 - e_(t - 1,j + 1)$. + + The state-update equations are conditioned by exact big-$M$ bounds with $M = 1$, because every left-hand side and every referenced right-hand side is binary. For every token $i$, step $t$, and position $p$: + if no-op is chosen, impose + $z_(t,p,i) - z_(t - 1,p,i) <= 1 - nu_t$ and $z_(t - 1,p,i) - z_(t,p,i) <= 1 - nu_t$; + for every delete position $j$, if $p < j$ impose + $z_(t,p,i) - z_(t - 1,p,i) <= 1 - d_(t,j)$ and $z_(t - 1,p,i) - z_(t,p,i) <= 1 - d_(t,j)$; + if $j <= p < n - 1$, impose + $z_(t,p,i) - z_(t - 1,p + 1,i) <= 1 - d_(t,j)$ and $z_(t - 1,p + 1,i) - z_(t,p,i) <= 1 - d_(t,j)$; + and for the last position impose $z_(t,n - 1,i) <= 1 - d_(t,j)$; + for every swap position $j$, if $p in.not {j, j + 1}$ impose + $z_(t,p,i) - z_(t - 1,p,i) <= 1 - s_(t,j)$ and $z_(t - 1,p,i) - z_(t,p,i) <= 1 - s_(t,j)$; + if $p = j$, impose + $z_(t,j,i) - z_(t - 1,j + 1,i) <= 1 - s_(t,j)$ and $z_(t - 1,j + 1,i) - z_(t,j,i) <= 1 - s_(t,j)$; + and if $p = j + 1$, impose + $z_(t,j + 1,i) - z_(t - 1,j,i) <= 1 - s_(t,j)$ and $z_(t - 1,j,i) - z_(t,j + 1,i) <= 1 - s_(t,j)$. + + Finally force the step-$K$ state to equal the target string: + $sum_(i : x_i = y_p) z_(K,p,i) = 1$ for every target position $p in {0, dots, m - 1}$, + and $e_(K,p) = 1$ for every $p in {m, dots, n - 1}$. + This exactly matches the model semantics, which compare the final working string to the target after $K$ operations. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & e_(t,p) + sum_i z_(t,p,i) = 1 quad forall t in {0, dots, K}, p in {0, dots, n - 1} \ + & sum_p z_(t,p,i) <= 1 quad forall t in {0, dots, K}, i in {0, dots, n - 1} \ + & e_(t,p) <= e_(t,p + 1) quad forall t in {0, dots, K}, p in {0, dots, n - 2} \ + & z_(0,p,p) = 1, z_(0,p,i) = 0 quad forall i != p, e_(0,p) = 0 quad forall p \ + & sum_(j = 0)^(n - 1) d_(t,j) + sum_(j = 0)^(n - 2) s_(t,j) + nu_t = 1 quad forall t in {1, dots, K} \ + & d_(t,j) <= 1 - e_(t - 1,j) quad forall t, j \ + & s_(t,j) <= 1 - e_(t - 1,j), s_(t,j) <= 1 - e_(t - 1,j + 1) quad forall t, j \ + & "the exact M = 1 state-update equations enforce no-op, delete, and adjacent-swap transitions" \ + & sum_(i : x_i = y_p) z_(K,p,i) = 1 quad forall p in {0, dots, m - 1} \ + & e_(K,p) = 1 quad forall p in {m, dots, n - 1} \ + & z_(t,p,i), e_(t,p), d_(t,j), s_(t,j), nu_t in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any valid length-$K$ edit script yields a feasible sequence of operations and states ending at the target. ($arrow.l.double$) Any feasible ILP solution traces a legal sequence of deletes, adjacent swaps, and no-ops whose final string is the target. + + _Solution extraction._ For each step $t$, compute the current length + $ell_(t - 1) = sum_(p = 0)^(n - 1) (1 - e_(t - 1,p))$. + If $nu_t = 1$, output the source code $2 n$. If $d_(t,j) = 1$, output $j$. If $s_(t,j) = 1$, output $ell_(t - 1) + j$. This is exactly the encoding used by `evaluate()`: deletions use raw positions, swaps are offset by the current length, and no-op is the distinguished value $2 n$. +] + +#reduction-rule("PaintShop", "ILP")[ + One binary variable per car determines its first color, the second occurrence receives the opposite color automatically, and switch indicators count color changes along the sequence. +][ + _Construction._ Variables: binary $x_i$ for each car $i$, binary color variables $k_p$ for sequence positions, and binary switch indicators $c_p$ for positions $p > 0$. The ILP is: + $ + min quad & sum_p c_p \ + "subject to" quad & k_p = x_i quad "if p is the first occurrence of car i" \ + & k_p = 1 - x_i quad "otherwise" \ + & c_p >= k_p - k_(p-1) quad forall p > 0 \ + & c_p >= k_(p-1) - k_p quad forall p > 0 \ + & x_i, k_p, c_p in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any first-occurrence coloring determines the whole paint sequence and induces exactly the same number of switches in the ILP. ($arrow.l.double$) Any ILP assignment is already a valid source witness, and the switch variables are forced to count adjacent color changes. + + _Solution extraction._ Output the first-occurrence color bits $(x_i)$. +] + +#reduction-rule("IsomorphicSpanningTree", "ILP")[ + A bijection from the tree vertices to the graph vertices is enough: every tree edge must map to a graph edge, which then defines the desired spanning tree. +][ + _Construction._ Variables: binary $x_(u,v)$ with $x_(u,v) = 1$ iff tree vertex $u$ maps to graph vertex $v$. The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_v x_(u,v) = 1 quad forall u \ + & sum_u x_(u,v) = 1 quad forall v \ + & x_(u,v) + x_(w,z) <= 1 quad forall {u, w} in E_"tree", {v, z} in.not E_"graph" \ + & x_(u,z) + x_(w,v) <= 1 quad forall {u, w} in E_"tree", {v, z} in.not E_"graph" \ + & x_(u,v) in {0, 1} + $. + + _Correctness._ ($arrow.r.double$) Any isomorphism from the given tree to a spanning tree of the graph satisfies the bijection and non-edge constraints. ($arrow.l.double$) Any feasible ILP solution is a bijection that preserves every tree edge, so the image edges form a spanning tree of the graph isomorphic to the source tree. + + _Solution extraction._ For each tree vertex $u$, output the unique graph vertex $v$ with $x_(u,v) = 1$. +] + +#reduction-rule("RootedTreeStorageAssignment", "ILP")[ + Choose one parent for each non-root element, enforce acyclicity with depth variables, and linearize the path-extension cost of every subset by selecting its top and bottom vertices in the rooted tree. +][ + _Construction._ Let $X = {0, dots, n - 1}$ and let the subset family be $cal(C) = {S_0, dots, S_(m-1)}$. For every subset of size 0 or 1 the model charges extension cost 0 automatically, so only the nontrivial subsets matter. Enumerate them as + $I = {k_0 < dots < k_(r-1)} = {k : |S_k| >= 2}$. + Use `ILP`. The variable blocks are: + parent indicators $p_(v,u) in {0, 1}$ for all $v, u in X$; + depths $d_v in {0, dots, n - 1}$; + ancestor indicators $a_(u,v) in {0, 1}$, where $a_(u,v) = 1$ means $u$ is an ancestor of $v$ (allowing $u = v$); + auxiliary transitive-closure variables $h_(u,v,w) in {0, 1}$; + and, for each nontrivial subset gadget $s in {0, dots, r - 1}$ corresponding to original subset $S_(k_s)$, top selectors $t_(s,u)$, bottom selectors $b_(s,v)$, pair selectors $m_(s,u,v)$, endpoint depths $T_s, B_s$, and extension cost $c_s$. + + The indices are + $"idx"_p(v,u) = v n + u$, + $"idx"_d(v) = n^2 + v$, + $"idx"_a(u,v) = n^2 + n + u n + v$, + $"idx"_h(u,v,w) = 2 n^2 + n + (u n + v) n + w$, + $"idx"_t(s,u) = n^3 + 2 n^2 + n + s n + u$, + $"idx"_b(s,v) = n^3 + 2 n^2 + n + r n + s n + v$, + $"idx"_m(s,u,v) = n^3 + 2 n^2 + n + 2 r n + s n^2 + u n + v$, + $"idx"_T(s) = n^3 + 2 n^2 + n + 2 r n + r n^2 + s$, + $"idx"_B(s) = n^3 + 2 n^2 + n + 2 r n + r n^2 + r + s$, + and $ "idx"_c(s) = n^3 + 2 n^2 + n + 2 r n + r n^2 + 2 r + s$. + The total number of variables is + $n^3 + 2 n^2 + n + r (n^2 + 2 n + 3)$. + + The rooted-tree constraints are: + $sum_(u = 0)^(n - 1) p_(v,u) = 1$ for every vertex $v$; + $sum_v p_(v,v) = 1$, so exactly one vertex chooses itself as parent and becomes the root; + $d_v <= (n - 1) (1 - p_(v,v))$ for every $v$, hence the unique root has depth 0; + and for every ordered pair $u != v$, + $d_v - d_u >= 1 - n (1 - p_(v,u))$ and $d_v - d_u <= 1 + n (1 - p_(v,u))$. + The exact big-$M$ here is $M = n$: the expression $d_v - d_u - 1$ ranges from $-n$ to $n - 2$ when both depths lie in ${0, dots, n - 1}$. + + The ancestor relation is defined explicitly by + $a_(v,v) = 1$ for every $v$, + $h_(u,v,v) = 0$ for all $u, v$, + and, for every $u != v$, + $a_(u,v) = sum_(w = 0)^(n - 1) h_(u,v,w)$. + The helper variables linearize the recursion "u is an ancestor of v iff u is an ancestor of the unique parent of v": + $h_(u,v,w) <= p_(v,w)$, + $h_(u,v,w) <= a_(u,w)$, + and + $h_(u,v,w) >= p_(v,w) + a_(u,w) - 1$ + for all $u, v, w$ with $w != v$. + + For each nontrivial subset gadget $s$ corresponding to $S_(k_s)$, choose path endpoints only from the subset itself: + $sum_(u in S_(k_s)) t_(s,u) = 1$, $t_(s,u) = 0$ for $u in.not S_(k_s)$, + $sum_(v in S_(k_s)) b_(s,v) = 1$, $b_(s,v) = 0$ for $v in.not S_(k_s)$. + Linearize the chosen ordered endpoint pair by + $m_(s,u,v) <= t_(s,u)$, + $m_(s,u,v) <= b_(s,v)$, + $m_(s,u,v) >= t_(s,u) + b_(s,v) - 1$. + Because exactly one top and one bottom are chosen, exactly one pair selector is 1. + + The path condition for subset $S_(k_s)$ is then fully explicit: + $m_(s,u,v) <= a_(u,v)$ for every $u, v$, so the chosen top is an ancestor of the chosen bottom; + and for every element $w in S_(k_s)$, + $m_(s,u,v) <= a_(u,w)$ and $m_(s,u,v) <= a_(w,v)$ for all $u, v$. + Thus every subset element lies on the ancestor chain from the chosen top to the chosen bottom. + + Bind the endpoint depths to the chosen selectors by exact big-$M$ constraints with $M = n - 1$: + $T_s - d_u <= (n - 1) (1 - t_(s,u))$ and $d_u - T_s <= (n - 1) (1 - t_(s,u))$ for every $u$; + $B_s - d_v <= (n - 1) (1 - b_(s,v))$ and $d_v - B_s <= (n - 1) (1 - b_(s,v))$ for every $v$. + Finally set the extension cost of the subset to the exact path surplus + $c_s = B_s - T_s + 1 - |S_(k_s)|$, + require $c_s >= 0$, + and bound the total cost by + $sum_(s = 0)^(r - 1) c_s <= K$. + This matches the model's `subset_extension_cost()`: the top and bottom are the shallowest and deepest members of the subset on the chosen chain, and the path contributes exactly the interior vertices not already present in the subset. + + The ILP is: + $ + "find" quad & bold(x) \ + "subject to" quad & sum_(u = 0)^(n - 1) p_(v,u) = 1 quad forall v in X \ + & sum_v p_(v,v) = 1, d_v <= (n - 1) (1 - p_(v,v)) quad forall v in X \ + & d_v - d_u >= 1 - n (1 - p_(v,u)), d_v - d_u <= 1 + n (1 - p_(v,u)) quad forall u != v \ + & a_(v,v) = 1, h_(u,v,v) = 0, a_(u,v) = sum_(w = 0)^(n - 1) h_(u,v,w) quad forall u, v in X \ + & h_(u,v,w) <= p_(v,w), h_(u,v,w) <= a_(u,w), h_(u,v,w) >= p_(v,w) + a_(u,w) - 1 quad forall u, v, w in X " with " w != v \ + & sum_(u in S_(k_s)) t_(s,u) = 1, t_(s,u) = 0 quad forall u in.not S_(k_s), s \ + & sum_(v in S_(k_s)) b_(s,v) = 1, b_(s,v) = 0 quad forall v in.not S_(k_s), s \ + & m_(s,u,v) <= t_(s,u), m_(s,u,v) <= b_(s,v), m_(s,u,v) >= t_(s,u) + b_(s,v) - 1 quad forall s, u, v \ + & m_(s,u,v) <= a_(u,v), m_(s,u,v) <= a_(u,w), m_(s,u,v) <= a_(w,v) quad forall s, u, v, w in S_(k_s) \ + & T_s - d_u <= (n - 1) (1 - t_(s,u)), d_u - T_s <= (n - 1) (1 - t_(s,u)) quad forall s, u \ + & B_s - d_v <= (n - 1) (1 - b_(s,v)), d_v - B_s <= (n - 1) (1 - b_(s,v)) quad forall s, v \ + & c_s = B_s - T_s + 1 - |S_(k_s)|, c_s >= 0 quad forall s; sum_s c_s <= K \ + & p_(v,u), a_(u,v), h_(u,v,w), t_(s,u), b_(s,v), m_(s,u,v) in {0, 1} \ + & d_v, T_s, B_s, c_s in ZZ_(>=0) + $. + + _Correctness._ ($arrow.r.double$) Any rooted tree satisfying all subset-path conditions induces parent, depth, and path-endpoint variables with the same total extension cost. ($arrow.l.double$) Any feasible ILP solution defines a rooted tree in which every subset lies on one ancestor chain, and the encoded path lengths keep the total extension cost within the bound. + + _Solution extraction._ For each vertex $v$, output its unique parent $u$ with $p_(v,u) = 1$. +] + == Unit Disk Mapping #reduction-rule("MaximumIndependentSet", "KingsSubgraph")[ diff --git a/docs/superpowers/plans/2026-03-24-tier3-ilp-reductions.md b/docs/superpowers/plans/2026-03-24-tier3-ilp-reductions.md new file mode 100644 index 00000000..d0338d75 --- /dev/null +++ b/docs/superpowers/plans/2026-03-24-tier3-ilp-reductions.md @@ -0,0 +1,547 @@ +# Tier 3 ILP Reductions Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Connect 39 orphan Tier 3 problems to ILP via direct reductions in one PR, with shared linearization helpers. + +**Architecture:** Each reduction follows the established pattern: `#[reduction(overhead)]` macro on `impl ReduceTo>`, a `ReductionResult` struct with `extract_solution`, and a closed-loop test. A new `ilp_helpers.rs` module provides shared linearization primitives (McCormick, MTZ, flow conservation, big-M, abs-diff, minimax, one-hot decode). Paper entries are already written in `docs/paper/reductions.typ`. + +**Tech Stack:** Rust, `#[reduction]` proc macro, `ILP` / `ILP` target types, `LinearConstraint` API. + +**Spec:** `docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md` +**Paper entries:** `docs/paper/reductions.typ` (search for each `#reduction-rule("", "ILP")`) + +**Status:** Paper entries are committed and reviewed. All 39 entries have standardized multiline ILP equation blocks + detailed prose constructions. 9 complex entries have been expanded with full variable indexing, big-M values, and flow schemes. All symbols verified against problem definitions. + +--- + +## CRITICAL: Paper Is Ground Truth + +**The Typst paper (`docs/paper/reductions.typ`) is the authoritative source for every ILP formulation.** Each reduction-rule entry contains a standardized multiline equation block showing the complete ILP (objective/find + constraints + domain), plus prose explaining variable meanings and solution extraction. These entries have been reviewed and verified against the model files. + +**When implementing each reduction in Rust, you MUST:** +1. **Read the paper entry first** — find the `#reduction-rule("", "ILP")` block +2. **Implement exactly the formulation described in the paper** — same variables, same constraints, same extraction logic. Do NOT invent a different formulation. +3. **Cross-check** — if you find the paper's formulation seems wrong or incomplete, STOP and flag it for human review. Do not silently deviate. +4. **The spec file is secondary** — it provides metadata (ILP type, helpers, dims) but the paper has the precise mathematical construction. When they conflict, the paper wins. + +--- + +## File Structure + +**New files (40 total):** +- `src/rules/ilp_helpers.rs` — shared helper module +- `src/unit_tests/rules/ilp_helpers.rs` — helper tests +- 39 rule files: `src/rules/_ilp.rs` +- 39 test files: `src/unit_tests/rules/_ilp.rs` + +**Modified files:** +- `src/rules/mod.rs` — 39 module declarations + 39 canonical_rule_example_specs calls + +--- + +## Reference Files + +Before implementing ANY task, read these files to understand the patterns: + +- **Rule template:** `src/rules/maximalis_ilp.rs` (complete ILP reduction example) +- **Test template:** `src/unit_tests/rules/knapsack_ilp.rs` (closed-loop test pattern) +- **Test helpers:** `src/rules/test_helpers.rs` (assertion functions) +- **ILP model:** `src/models/algebraic/ilp.rs` (LinearConstraint, ILP struct, ObjectiveSense) +- **Paper formulations:** `docs/paper/reductions.typ` lines 8206-8607 (mathematical reference for each reduction) + +--- + +## Task 0: Helper Module + +**Files:** +- Create: `src/rules/ilp_helpers.rs` +- Create: `src/unit_tests/rules/ilp_helpers.rs` +- Modify: `src/rules/mod.rs` (add module declaration) + +- [ ] **Step 0.1: Add module declaration to mod.rs** + +Add inside the `#[cfg(feature = "ilp-solver")]` block in `src/rules/mod.rs`: +```rust +#[cfg(feature = "ilp-solver")] +pub(crate) mod ilp_helpers; +``` + +- [ ] **Step 0.2: Write helper tests (TDD)** + +Create `src/unit_tests/rules/ilp_helpers.rs` with tests for all 7 helpers: +```rust +// Test mccormick_product: verify 3 constraints y<=x_a, y<=x_b, y>=x_a+x_b-1 +// Test mtz_ordering: verify arc constraints + bound constraints +// Test flow_conservation: verify demand equations at each node +// Test big_m_activation: verify f <= M*y +// Test abs_diff_le: verify two constraints for |a-b| <= z +// Test minimax_constraints: verify z >= expr_i for each expr +// Test one_hot_decode: verify correct index extraction +``` + +- [ ] **Step 0.3: Implement ilp_helpers.rs** + +Create `src/rules/ilp_helpers.rs` with 7 public functions matching the spec's Phase 0 signatures. Reference `src/models/algebraic/ilp.rs` for `LinearConstraint` API. + +- [ ] **Step 0.4: Run tests, verify pass** + +```bash +cargo test --features ilp-solver ilp_helpers -- --nocapture +``` + +- [ ] **Step 0.5: Commit** + +```bash +git add src/rules/ilp_helpers.rs src/unit_tests/rules/ilp_helpers.rs src/rules/mod.rs +git commit -m "feat: add shared ILP linearization helpers (McCormick, MTZ, flow, big-M, abs-diff, minimax, one-hot)" +``` + +--- + +## Task 1: Flow-based reductions (9 rules) + +**For each rule below, follow this sub-pattern:** +1. **Read the paper entry FIRST** (`docs/paper/reductions.typ`) — this is the ground truth for the ILP formulation (variables, constraints, objective, extraction). Implement exactly what it says. +2. Read the model file (`src/models//.rs`) — note `dims()`, `Value`, getters for overhead expressions +3. Write the test file (`src/unit_tests/rules/_ilp.rs`) — closed-loop test with small instance +4. Write the rule file (`src/rules/_ilp.rs`) — implement the paper's formulation in Rust, with extract_solution + canonical example +5. Add module + specs registration to `src/rules/mod.rs` +6. Run `cargo test --features ilp-solver _ilp` +7. Run `cargo clippy --features ilp-solver` + +### Task 1.1: IntegralFlowHomologousArcs → ILP + +**Files:** +- Create: `src/rules/integralflowhomologousarcs_ilp.rs` +- Create: `src/unit_tests/rules/integralflowhomologousarcs_ilp.rs` +- Modify: `src/rules/mod.rs` +- Model: `src/models/graph/integral_flow_homologous_arcs.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8209 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (f_a values). +**Formulation:** Integer f_a per arc. Capacity, conservation, homologous equality, requirement. +**Helpers:** `flow_conservation` + +- [ ] **Step 1.1.1:** Write test — construct small network (4-5 nodes, 6-8 arcs, 1-2 homologous pairs), test closed-loop with `assert_satisfaction_round_trip_from_satisfaction_target` +- [ ] **Step 1.1.2:** Write rule — `impl ReduceTo>`, overhead = `{ num_vars = "num_arcs", num_constraints = "num_arcs + num_vertices + num_homologous_pairs" }` +- [ ] **Step 1.1.3:** Register in mod.rs, run tests + clippy + +### Task 1.2: IntegralFlowWithMultipliers → ILP + +**Files:** `src/rules/integralflowwithmultipliers_ilp.rs` + test +- Model: `src/models/graph/integral_flow_with_multipliers.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8219 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct. +**Formulation:** Integer f_a per arc. Capacity, multiplier-scaled conservation, requirement. +**Helpers:** `flow_conservation` (adapted for multipliers) + +- [ ] **Step 1.2.1-1.2.3:** Test → Rule → Register (same sub-pattern) + +### Task 1.3: PathConstrainedNetworkFlow → ILP + +**Files:** `src/rules/pathconstrainednetworkflow_ilp.rs` + test +- Model: `src/models/graph/path_constrained_network_flow.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8229 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (f_p per path). +**Formulation:** Integer f_p per allowed path. Arc capacity aggregation, requirement. +**Helpers:** None + +- [ ] **Step 1.3.1-1.3.3:** Test → Rule → Register + +### Task 1.4: DisjointConnectingPaths → ILP + +**Files:** `src/rules/disjointconnectingpaths_ilp.rs` + test +- Model: `src/models/graph/disjoint_connecting_paths.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8239 + +**ILP type:** `bool`. **Value:** `Or`. **Extract:** OR over commodities → binary edge selection. +**Formulation:** Binary f^k_{uv} per commodity per arc. Conservation, vertex-disjointness (Σ_k ≤ 1), order vars for subtour elimination. +**Helpers:** `flow_conservation` + +- [ ] **Step 1.4.1-1.4.3:** Test → Rule → Register + +### Task 1.5: LengthBoundedDisjointPaths → ILP + +**Files:** `src/rules/lengthboundeddisjointpaths_ilp.rs` + test +- Model: `src/models/graph/length_bounded_disjoint_paths.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8249 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Flow vars → vertex indicators per path slot. +**Formulation:** Binary flow + integer hop counters per commodity. Conservation, disjointness, hop ≤ L. +**Helpers:** `flow_conservation` + +- [ ] **Step 1.5.1-1.5.3:** Test → Rule → Register + +### Task 1.6: MixedChinesePostman → ILP + +**Files:** `src/rules/mixedchinesepostman_ilp.rs` + test +- Model: `src/models/graph/mixed_chinese_postman.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8259 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Orientation bits d_e. +**Formulation:** Binary orientation + integer augmentation + connectivity flow. Euler balance, length bound. +**Helpers:** `flow_conservation`, `big_m_activation` + +- [ ] **Step 1.6.1-1.6.3:** Test → Rule → Register + +### Task 1.7: RuralPostman → ILP + +**Files:** `src/rules/ruralpostman_ilp.rs` + test +- Model: `src/models/graph/rural_postman.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8269 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (t_e ternary multiplicity, `dims() = vec![3; num_edges]`). +**Formulation:** Integer t_e ∈ {0,1,2} + binary y_e + flow. Required coverage, even degree, connectivity, length bound. +**Helpers:** `flow_conservation`, `big_m_activation` + +- [ ] **Step 1.7.1-1.7.3:** Test → Rule → Register + +### Task 1.8: StackerCrane → ILP + +**Files:** `src/rules/stackercrane_ilp.rs` + test +- Model: `src/models/misc/stacker_crane.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8279 + +**ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → arc permutation (`dims() = vec![m; m]`). +**Formulation:** Binary x_{a,p} position-assignment + McCormick z for consecutive pairs. Precomputed shortest-path connector costs. +**Helpers:** `mccormick_product`, `one_hot_decode` + +- [ ] **Step 1.8.1-1.8.3:** Test → Rule → Register + +### Task 1.9: SteinerTreeInGraphs → ILP + +**Files:** `src/rules/steinertreeingraphs_ilp.rs` + test +- Model: `src/models/graph/steiner_tree_in_graphs.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8289 + +**ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Direct (edge selection). +**Formulation:** Binary y_e + multi-commodity flow. Same pattern as existing SteinerTree→ILP. +**Helpers:** `flow_conservation`, `big_m_activation` + +- [ ] **Step 1.9.1-1.9.3:** Test (use `assert_optimization_round_trip_from_optimization_target`) → Rule → Register + +- [ ] **Step 1.10: Run full flow-based test suite + commit** + +```bash +cargo test --features ilp-solver -- integralflow steiner disjoint lengthbounded mixed rural stacker +cargo clippy --features ilp-solver +git add src/rules/*_ilp.rs src/unit_tests/rules/*_ilp.rs src/rules/mod.rs +git commit -m "feat: add 9 flow-based Tier 3 ILP reductions" +``` + +--- + +## Task 2: Scheduling reductions (7 rules) + +**Common note:** FlowShopScheduling, MinimumTardinessSequencing, SequencingToMinimizeWeightedTardiness use Lehmer-code configs. Extract via: sort jobs by ILP completion times → derive permutation → convert to Lehmer code. Use a shared `permutation_to_lehmer()` helper (can be added to `ilp_helpers.rs`). + +### Task 2.1: FlowShopScheduling → ILP +- Model: `src/models/misc/flow_shop_scheduling.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8301 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Completion times → sort → Lehmer code. +- [ ] **Step 2.1.1-2.1.3:** Test → Rule → Register + +### Task 2.2: MinimumTardinessSequencing → ILP +- Model: `src/models/misc/minimum_tardiness_sequencing.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8311 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Position decode → Lehmer code. +- [ ] **Step 2.2.1-2.2.3:** Test (optimization round-trip) → Rule → Register + +### Task 2.3: ResourceConstrainedScheduling → ILP +- Model: `src/models/misc/resource_constrained_scheduling.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8321 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Time-slot decode. +- [ ] **Step 2.3.1-2.3.3:** Test → Rule → Register + +### Task 2.4: SequencingToMinimizeMaximumCumulativeCost → ILP +- Model: `src/models/misc/sequencing_to_minimize_maximum_cumulative_cost.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8331 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Position decode → Lehmer code. +- [ ] **Step 2.4.1-2.4.3:** Test → Rule → Register + +### Task 2.5: SequencingToMinimizeWeightedTardiness → ILP +- Model: `src/models/misc/sequencing_to_minimize_weighted_tardiness.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8341 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Completion times → sort → Lehmer code. +- [ ] **Step 2.5.1-2.5.3:** Test → Rule → Register + +### Task 2.6: SequencingWithReleaseTimesAndDeadlines → ILP +- Model: `src/models/misc/sequencing_with_release_times_and_deadlines.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8351 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Start-time decode → sort → Lehmer code. +- [ ] **Step 2.6.1-2.6.3:** Test → Rule → Register + +### Task 2.7: TimetableDesign → ILP +- Model: `src/models/misc/timetable_design.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8361 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (binary tensor). +- [ ] **Step 2.7.1-2.7.3:** Test → Rule → Register + +- [ ] **Step 2.8: Run full scheduling test suite + commit** + +```bash +cargo test --features ilp-solver -- flowshop tardiness resourceconstrained sequencing timetable +cargo clippy --features ilp-solver +git commit -m "feat: add 7 scheduling Tier 3 ILP reductions" +``` + +--- + +## Task 3: Position/Assignment + McCormick reductions (6 rules) + +### Task 3.1: HamiltonianPath → ILP +- Model: `src/models/graph/hamiltonian_path.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8373 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → vertex permutation (`dims() = vec![n; n]`). +- **Helpers:** `mccormick_product`, `one_hot_decode` +- [ ] **Step 3.1.1-3.1.3:** Test → Rule → Register + +### Task 3.2: BottleneckTravelingSalesman → ILP +- Model: `src/models/graph/bottleneck_traveling_salesman.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8383 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Position tour → edge selection (`dims() = vec![2; num_edges]`). +- **Helpers:** `mccormick_product`, `minimax_constraints`, `one_hot_decode` +- [ ] **Step 3.2.1-3.2.3:** Test (optimization round-trip) → Rule → Register + +### Task 3.3: LongestCircuit → ILP +- Model: `src/models/graph/longest_circuit.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8393 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (binary edge selection). +- **Formulation:** Degree-2 vertex selection + flow connectivity (NOT position-assignment). No McCormick. +- **Helpers:** `flow_conservation` +- [ ] **Step 3.3.1-3.3.3:** Test → Rule → Register + +### Task 3.4: QuadraticAssignment → ILP +- Model: `src/models/algebraic/quadratic_assignment.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8403 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** One-hot decode → injection (`dims() = vec![num_locations; num_facilities]`). +- **Helpers:** `mccormick_product`, `one_hot_decode` +- [ ] **Step 3.4.1-3.4.3:** Test (optimization round-trip) → Rule → Register + +### Task 3.5: OptimalLinearArrangement → ILP +- Model: `src/models/graph/optimal_linear_arrangement.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8413 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → vertex positions (`dims() = vec![n; n]`). +- **Helpers:** `abs_diff_le`, `one_hot_decode` +- [ ] **Step 3.5.1-3.5.3:** Test → Rule → Register + +### Task 3.6: SubgraphIsomorphism → ILP +- Model: `src/models/graph/subgraph_isomorphism.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8423 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → injection (`dims() = vec![n_host; n_pattern]`). +- **Formulation:** No McCormick — direct non-edge constraints `x_{v,u} + x_{w,u'} ≤ 1`. +- [ ] **Step 3.6.1-3.6.3:** Test → Rule → Register + +- [ ] **Step 3.7: Run full position/assignment test suite + commit** + +```bash +cargo test --features ilp-solver -- hamiltonianpath bottleneck longestcircuit quadratic optimal subgraph +cargo clippy --features ilp-solver +git commit -m "feat: add 6 position/assignment Tier 3 ILP reductions" +``` + +--- + +## Task 4: Graph structure reductions (7 rules) + +### Task 4.1: AcyclicPartition → ILP +- Model: `src/models/graph/acyclic_partition.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8435 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode x_{v,c} → partition label (`dims() = vec![n; n]`). +- **Formulation:** Binary assignment + McCormick same-class indicators + class ordering for quotient DAG. +- **Helpers:** `mccormick_product`, `one_hot_decode` +- [ ] **Step 4.1.1-4.1.3:** Test → Rule → Register + +### Task 4.2: BalancedCompleteBipartiteSubgraph → ILP +- Model: `src/models/graph/balanced_complete_bipartite_subgraph.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8445 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (binary selection). +- **Formulation:** Binary x_l, y_r. Balance + non-edge constraints. No McCormick. +- [ ] **Step 4.2.1-4.2.3:** Test → Rule → Register + +### Task 4.3: BicliqueCover → ILP +- Model: `src/models/graph/biclique_cover.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8455 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Direct (membership bits). +- **Helpers:** `mccormick_product` +- [ ] **Step 4.3.1-4.3.3:** Test (optimization round-trip) → Rule → Register + +### Task 4.4: BiconnectivityAugmentation → ILP +- Model: `src/models/graph/biconnectivity_augmentation.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8465 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (binary edge selection). +- **Formulation:** Binary y_e + flow for 2-vertex-connectivity (per-vertex-deletion connectivity check). +- **Helpers:** `flow_conservation`, `big_m_activation` +- [ ] **Step 4.4.1-4.4.3:** Test → Rule → Register + +### Task 4.5: BoundedComponentSpanningForest → ILP +- Model: `src/models/graph/bounded_component_spanning_forest.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8475 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Component label decode. +- **Formulation:** Binary x_{v,c} assignment + weight bounds + flow connectivity within components. +- **Helpers:** `flow_conservation`, `one_hot_decode` +- [ ] **Step 4.5.1-4.5.3:** Test → Rule → Register + +### Task 4.6: MinimumCutIntoBoundedSets → ILP +- Model: `src/models/graph/minimum_cut_into_bounded_sets.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8485 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (partition bit-vector). +- **Formulation:** Binary x_v + binary y_e. Balance bounds + cut linking. +- [ ] **Step 4.6.1-4.6.3:** Test → Rule → Register + +### Task 4.7: StrongConnectivityAugmentation → ILP +- Model: `src/models/graph/strong_connectivity_augmentation.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8495 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (binary arc selection). +- **Formulation:** Binary y_a + bidirectional multi-commodity flow from root. +- **Helpers:** `flow_conservation`, `big_m_activation` +- [ ] **Step 4.7.1-4.7.3:** Test → Rule → Register + +- [ ] **Step 4.8: Run full graph structure test suite + commit** + +```bash +cargo test --features ilp-solver -- acyclicpartition balanced biclique biconnectivity bounded minimumcut strongconnectivity +cargo clippy --features ilp-solver +git commit -m "feat: add 7 graph structure Tier 3 ILP reductions" +``` + +--- + +## Task 5: Matrix/encoding reductions (5 rules) + +### Task 5.1: BMF → ILP +- Model: `src/models/algebraic/bmf.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8507 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Direct (factor matrix bits). +- **Formulation:** McCormick for Boolean products, OR-of-ANDs reconstruction, Hamming distance. +- **Helpers:** `mccormick_product` +- [ ] **Step 5.1.1-5.1.3:** Test (optimization round-trip) → Rule → Register + +### Task 5.2: ConsecutiveBlockMinimization → ILP +- Model: `src/models/algebraic/consecutive_block_minimization.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8517 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → column permutation (`dims() = vec![num_cols; num_cols]`). +- **Helpers:** `one_hot_decode` +- [ ] **Step 5.2.1-5.2.3:** Test → Rule → Register + +### Task 5.3: ConsecutiveOnesMatrixAugmentation → ILP +- Model: `src/models/algebraic/consecutive_ones_matrix_augmentation.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8527 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → column permutation. +- [ ] **Step 5.3.1-5.3.3:** Test → Rule → Register + +### Task 5.4: ConsecutiveOnesSubmatrix → ILP +- Model: `src/models/algebraic/consecutive_ones_submatrix.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8537 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Direct (s_j selection bits, `dims() = vec![2; num_cols]`). +- **Formulation:** Binary s_j + auxiliary permutation x_{c,p} + C1P interval constraints. +- [ ] **Step 5.4.1-5.4.3:** Test → Rule → Register + +### Task 5.5: SparseMatrixCompression → ILP +- Model: `src/models/algebraic/sparse_matrix_compression.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8547 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → shift assignment. +- [ ] **Step 5.5.1-5.5.3:** Test → Rule → Register + +- [ ] **Step 5.6: Run full matrix/encoding test suite + commit** + +```bash +cargo test --features ilp-solver -- bmf consecutiveblock consecutiveones sparse +cargo clippy --features ilp-solver +git commit -m "feat: add 5 matrix/encoding Tier 3 ILP reductions" +``` + +--- + +## Task 6: Sequence/misc reductions (5 rules) + +### Task 6.1: ShortestCommonSupersequence → ILP +- Model: `src/models/misc/shortest_common_supersequence.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8559 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Symbol sequence extraction. +- [ ] **Step 6.1.1-6.1.3:** Test → Rule → Register + +### Task 6.2: StringToStringCorrection → ILP +- Model: `src/models/misc/string_to_string_correction.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8569 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** Operation indicator → scalar operation code. +- [ ] **Step 6.2.1-6.2.3:** Test → Rule → Register + +### Task 6.3: PaintShop → ILP +- Model: `src/models/misc/paintshop.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8579 +- **ILP type:** `ILP`. **Value:** `Min` (optimization). **Extract:** Direct (x_i first-occurrence color bits, `dims() = vec![2; num_cars]`). +- [ ] **Step 6.3.1-6.3.3:** Test (optimization round-trip) → Rule → Register + +### Task 6.4: IsomorphicSpanningTree → ILP +- Model: `src/models/graph/isomorphic_spanning_tree.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8589 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot decode → bijection (`dims() = vec![n; n]`). +- **Formulation:** Pure bijection x_{u,v} with non-edge constraints (no flow needed). +- [ ] **Step 6.4.1-6.4.3:** Test → Rule → Register + +### Task 6.5: RootedTreeStorageAssignment → ILP +- Model: `src/models/set/rooted_tree_storage_assignment.rs` +- Paper: search for `#reduction-rule("ProblemName", "ILP")` ~line 8599 +- **ILP type:** `ILP`. **Value:** `Or`. **Extract:** One-hot parent decode → parent array (`dims() = vec![n; n]`). +- **Formulation:** Binary p_{v,u} parent indicators + integer depths + subset path extension costs. +- [ ] **Step 6.5.1-6.5.3:** Test → Rule → Register + +- [ ] **Step 6.6: Run full sequence/misc test suite + commit** + +```bash +cargo test --features ilp-solver -- shortestcommon stringtostring paintshop isomorphicspanning rootedtreestorage +cargo clippy --features ilp-solver +git commit -m "feat: add 5 sequence/misc Tier 3 ILP reductions" +``` + +--- + +## Task 7: Final verification and PR + +- [ ] **Step 7.1: Full test suite** + +```bash +make check +cargo test --features ilp-solver +``` + +- [ ] **Step 7.2: Paper completeness check** + +```bash +make paper +``` +Paper entries are already committed. Verify no new completeness warnings after Rust reductions are registered (the `#[reduction]` macro registrations should match the paper's `reduction-rule` entries). + +- [ ] **Step 7.3: Coverage check** + +```bash +make coverage +``` +Verify >95% coverage on new code. + +- [ ] **Step 7.4: Final commit and PR** + +```bash +git add -A +git commit -m "feat: add 39 Tier 3 ILP reductions + shared helpers + +Connects all remaining orphan NP-hard problems to ILP, enabling +DefaultSolver dispatch. Includes shared ilp_helpers module with +McCormick, MTZ, flow conservation, big-M, abs-diff, and minimax +linearization primitives. + +Closes #728, closes #733. +Ref #762." +``` + +Create PR targeting `main`. + +- [ ] **Step 7.5: Post-merge cleanup** + +- Update #762 body: move 39 problems from Tier 3 to Tier 1 +- Close #728 (TimetableDesign→ILP) and #733 (IntegralFlowHomologousArcs→ILP) +- File separate issues for deferred: PartialFeedbackEdgeSet→ILP, RootedTreeArrangement→ILP diff --git a/docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md b/docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md new file mode 100644 index 00000000..cbdfc6bc --- /dev/null +++ b/docs/superpowers/specs/2026-03-24-tier3-ilp-reductions-design.md @@ -0,0 +1,220 @@ +# Tier 3 ILP Reductions — Design Spec + +**Date:** 2026-03-24 +**Scope:** One PR adding 39 `→ ILP` reductions for Tier 3 orphan problems, plus a shared helper module. +**Deferred:** PartialFeedbackEdgeSet (no polynomial-size correct ILP for L < n), RootedTreeArrangement (compound `vec![n; 2*n]` config too complex for batch). +**Tracking issue:** #762 (DefaultSolver classification) + +--- + +## Goal + +Connect 39 of 41 isolated Tier 3 problem types to the reduction graph via direct ILP reductions. Two problems (PartialFeedbackEdgeSet, RootedTreeArrangement) are deferred to separate issues due to formulation complexity. + +## Deliverables + +1. `src/rules/ilp_helpers.rs` — shared linearization helpers (with unit tests) +2. 39 new reduction files `src/rules/_ilp.rs` (feature-gated under `#[cfg(feature = "ilp-solver")]`) +3. 39 entries in `src/rules/mod.rs`: module declarations + `canonical_rule_example_specs()` aggregation +4. 39 closed-loop tests in corresponding unit test files +5. 39 `reduction-rule` entries in `docs/paper/reductions.typ` +6. Updated #762 body (move Tier 3 → Tier 1) + +--- + +## Problem Classification + +### Value types (optimization vs satisfaction) + +**Optimization** (`Min`/`Max` — use `assert_optimization_round_trip_from_optimization_target`): +- BottleneckTravelingSalesman (`Min`), MinimumTardinessSequencing (`Min`), + QuadraticAssignment (`Min`), BMF (`Min`), PaintShop (`Min`), + SteinerTreeInGraphs (`Min`) + +**Satisfaction** (`Or` — use `assert_satisfaction_round_trip` or satisfaction variant): +- All other 33 problems + +### Config-space encodings requiring non-trivial `extract_solution` + +| Encoding | Problems | Extraction strategy | +|----------|----------|-------------------| +| **Lehmer code** `[n, n-1, ..., 1]` | FlowShopScheduling, MinimumTardinessSequencing, SequencingToMinimizeWeightedTardiness | Sort jobs by ILP completion times → derive permutation → convert to Lehmer code | +| **Vertex permutation** `vec![n; n]` | HamiltonianPath, OptimalLinearArrangement, ConsecutiveBlockMinimization, AcyclicPartition | One-hot decode: for each position/vertex, find the 1 in the assignment row | +| **Arc permutation** `vec![m; m]` | StackerCrane | Position-assignment decode: for each position, find the selected arc | +| **Injection** `vec![m; k]` | SubgraphIsomorphism, QuadraticAssignment | One-hot decode per source element | +| **Parent array** `vec![n; n]` | RootedTreeStorageAssignment | Decode parent-selection one-hot matrix → parent index per node | +| **Bijection** `vec![n; n]` | IsomorphicSpanningTree | One-hot decode tree-vertex → graph-vertex | +| **Compound** `vec![n; 2*n]` | *(RootedTreeArrangement — deferred)* | — | +| **Binary** `vec![2; ...]` | All others | Direct identity or first-k prefix extraction | +| **Ternary** `vec![3; num_edges]` | RuralPostman | Integer flow variable → clamp to {0,1,2} per edge | + +--- + +## Phase 0: Helper Module + +**File:** `src/rules/ilp_helpers.rs` + +Seven helper functions returning `Vec` (or single `LinearConstraint`): + +```rust +/// McCormick linearization: y = x_a * x_b (both binary). +/// Returns 3 constraints: y ≤ x_a, y ≤ x_b, y ≥ x_a + x_b - 1. +pub fn mccormick_product(y_idx: usize, x_a: usize, x_b: usize) -> Vec + +/// MTZ topological ordering for directed arcs. +/// For each arc (u→v): o_v - o_u ≥ 1 - M*(x_u + x_v). +/// When both x_u=0, x_v=0 (both kept): enforces o_v > o_u. +/// When either x_u=1 or x_v=1 (removed): constraint is slack. +/// Also emits bound constraints: x_i ≤ 1, 0 ≤ o_i ≤ n-1. +/// Matches the pattern in minimumfeedbackvertexset_ilp.rs. +pub fn mtz_ordering( + arcs: &[(usize, usize)], + n: usize, + x_offset: usize, + o_offset: usize, +) -> Vec + +/// Flow conservation at each node. +/// For each node u: Σ_{(u,v)} f_{uv} - Σ_{(v,u)} f_{vu} = demand[u]. +pub fn flow_conservation( + arcs: &[(usize, usize)], + num_nodes: usize, + flow_idx: &dyn Fn(usize) -> usize, + demand: &[f64], +) -> Vec + +/// Big-M activation: f ≤ M * y. Single constraint. +pub fn big_m_activation(f_idx: usize, y_idx: usize, big_m: f64) -> LinearConstraint + +/// Absolute value linearization: |a - b| ≤ z. +/// Returns 2 constraints: a - b ≤ z, b - a ≤ z. +pub fn abs_diff_le(a_idx: usize, b_idx: usize, z_idx: usize) -> Vec + +/// Minimax: z ≥ expr_i for each expression. +/// Each expr is a list of (var_idx, coeff) terms. +pub fn minimax_constraints(z_idx: usize, expr_terms: &[Vec<(usize, f64)>]) -> Vec + +/// One-hot to index extraction: given n*k binary assignment vars, +/// decode position p → value v where x_{v,p} = 1. +/// Shared by all permutation/assignment-based reductions. +pub fn one_hot_decode(solution: &[usize], num_items: usize, num_slots: usize, var_offset: usize) -> Vec +``` + +The helper module gets its own unit tests verifying constraint correctness. + +No new types introduced. Existing Tier 1/2 reductions are **not** refactored — helpers are used only by new Tier 3 code. + +--- + +## Phase 1: Flow-based (9 reductions) + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| IntegralFlowHomologousArcs | `Or` | `i32` | Integer f_a per arc | Capacity, conservation, homologous equality, requirement | `flow_conservation` | Direct (f_a values) | +| IntegralFlowWithMultipliers | `Or` | `i32` | Integer f_a per arc | Capacity, modified conservation (multiplier factors), requirement | `flow_conservation` | Direct | +| PathConstrainedNetworkFlow | `Or` | `i32` | Integer f_p per allowed path | Capacity aggregation per arc, flow requirement | — | Direct | +| DisjointConnectingPaths | `Or` | `bool` | Binary f^k_{uv} per commodity per arc | Conservation per commodity, vertex-disjointness (Σ_k ≤ 1 at non-terminals) | `flow_conservation` | Reconstruct edge selection from flow variables | +| LengthBoundedDisjointPaths | `Or` | `i32` | Binary f^k_{uv} + integer hop h^k_v per commodity | Conservation, disjointness, hop count h^k_v ≤ L per commodity | `flow_conservation` | Reconstruct edge selection from flow variables | +| MixedChinesePostman | `Or` | `i32` | Integer traversal t_a + binary orientation d_e | Euler balance (in = out), required edge/arc coverage ≥ 1 | `flow_conservation` | Direct (traversal counts) | +| RuralPostman | `Or` | `i32` | Integer t_e ∈ {0,1,2} per edge (traversal multiplicity) | Required edge coverage (t_e ≥ 1), Euler balance (even degree at each vertex), connectivity via flow, total cost ≤ bound | `flow_conservation`, `big_m_activation` | Direct (t_e values map to `dims() = vec![3; num_edges]`) | +| StackerCrane | `Or` | `i32` | Binary x_{a,k} (arc a at position k) + shortest-path cost auxiliaries | Position-assignment (each position gets one required arc, each arc used once), inter-arc connection cost via precomputed shortest paths, total ≤ bound | `big_m_activation` | One-hot decode → arc permutation (`dims() = vec![m; m]`) | +| SteinerTreeInGraphs | `Min` | `bool` | Binary y_e + multi-commodity flow f^t_{uv} | Conservation, capacity linking (same pattern as SteinerTree→ILP); minimize Σ w_e·y_e | `flow_conservation`, `big_m_activation` | Direct (edge selection) | + +--- + +## Phase 2: Scheduling (7 reductions) + +All scheduling problems with Lehmer-code configs share a common extraction pattern: ILP ordering variables → sort to get permutation → convert permutation to Lehmer code. + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| FlowShopScheduling | `Or` | `i32` | Binary y_{ij} (job i before j) + integer C_{jm} (completion on machine m) | Machine precedence: C_{j,m+1} ≥ C_{j,m} + p_{j,m+1}; ordering via big-M; makespan ≤ deadline | `big_m_activation` | Completion times → sort → Lehmer code | +| MinimumTardinessSequencing | `Min` | `i32` | Binary y_{ij} + integer C_j | Ordering via big-M, precedence constraints; objective: minimize Σ tardy_j (binary indicators for C_j > d_j) | `big_m_activation` | Completion times → sort → Lehmer code | +| ResourceConstrainedScheduling | `Or` | `bool` | Binary x_{jt} (job j starts at time t) | One start per job, precedence, resource capacity per period, deadline | — | Time-indexed decode → Lehmer code | +| SequencingToMinimizeMaximumCumulativeCost | `Or` | `i32` | Binary y_{ij} + integer C_j | Ordering via big-M; cumulative cost ≤ bound (feasibility, not minimax) | `big_m_activation` | Completion times → sort → Lehmer code | +| SequencingToMinimizeWeightedTardiness | `Or` | `i32` | Binary y_{ij} + integer C_j | Ordering via big-M; Σ w_j * max(0, C_j - d_j) ≤ bound (feasibility) | `big_m_activation` | Completion times → sort → Lehmer code | +| SequencingWithReleaseTimesAndDeadlines | `Or` | `bool` | Binary x_{jt} (job j at time t) | Release: no start before r_j, deadline: finish by d_j, non-overlap | — | Time-indexed decode → Lehmer code | +| TimetableDesign | `Or` | `bool` | Binary x_{c,t,h} (craftsman c, task t, period h) | Craftsman exclusivity, task exclusivity, requirement satisfaction | — | Direct (binary) | + +--- + +## Phase 3: Position/Assignment + McCormick (6 reductions) + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| HamiltonianPath | `Or` | `bool` | Binary x_{v,k} (vertex v at position k) | Row/column assignment, adjacency: McCormick for consecutive pairs | `mccormick_product` | One-hot decode → vertex permutation (`dims() = vec![n; n]`) | +| BottleneckTravelingSalesman | `Min` | `i32` | Binary x_{v,k} + integer z (bottleneck) | TSP assignment + z ≥ w(u,v) for each used edge (McCormick); minimize z | `mccormick_product`, `minimax_constraints` | Edge selection from assignment matrix (`dims() = vec![2; num_edges]`) | +| LongestCircuit | `Or` | `bool` | Binary y_e (edge selection) + binary s_v (vertex on circuit) + flow vars | Degree: Σ_{e∋v} y_e = 2·s_v; size: Σ y_e ≥ 3; connectivity via root-flow on selected edges; length: Σ w_e·y_e ≥ B | `flow_conservation` | Direct (y_e binary edge vector, `dims() = vec![2; num_edges]`) | +| QuadraticAssignment | `Min` | `bool` | Binary x_{i,p} (facility i at location p) | Assignment + McCormick for x_{i,p}·x_{j,q}; minimize Σ C_{ij}·D_{f(i),f(j)} | `mccormick_product` | One-hot decode → facility-to-location injection (`dims() = vec![num_locations; num_facilities]`) | +| OptimalLinearArrangement | `Or` | `i32` | Binary x_{v,p} + integer z_{uv} per edge | Assignment + z_{uv} ≥ |π(u)-π(v)| via abs_diff; Σ z_{uv} ≤ bound | `abs_diff_le` | One-hot decode → vertex-to-position (`dims() = vec![n; n]`) | +| SubgraphIsomorphism | `Or` | `bool` | Binary x_{v,u} (pattern v → host u) | Injection (each pattern vertex maps to exactly 1 host vertex, each host vertex used ≤ 1 time) + edge preservation: for each pattern edge (v,w) and host non-edge (u,u'), x_{v,u} + x_{w,u'} ≤ 1 (no McCormick needed) | — | One-hot decode → injection (`dims() = vec![n_host; n_pattern]`) | + +--- + +## Phase 4: Graph structure (7 reductions) + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| AcyclicPartition | `Or` | `i32` | Binary x_{v,c} (vertex v in class c) + integer o_c (class ordering) + binary s_{uv,c} (same-class indicators per arc per class) | Assignment (Σ_c x_{v,c} = 1); weight bound per class; cost bound on inter-class arcs; same-class: s_{uv,c} via McCormick on x_{u,c}·x_{v,c}; DAG: for each arc (u→v), o_v_class - o_u_class ≥ 1 - M·Σ_c s_{uv,c} | `mccormick_product` | One-hot decode x_{v,c} → partition label (`dims() = vec![n; n]`) | +| BalancedCompleteBipartiteSubgraph | `Or` | `bool` | Binary x_v (side A), y_v (side B) | Balance: Σx = Σy = k; completeness: McCormick for x_u·y_v on non-edges | `mccormick_product` | Direct (binary) | +| BicliqueCover | `Or` | `bool` | Binary z_{v,j} (vertex v in biclique j) | Biclique validity via McCormick, edge coverage | `mccormick_product` | Direct (binary) | +| BiconnectivityAugmentation | `Or` | `i32` | Binary y_e (add edge e) + flow vars for 2-vertex-connectivity | For each vertex v: removing v must leave graph connected. Formulated via flow: for each vertex v and each pair (s,t) of v's neighbors, unit flow from s to t avoiding v, through original + selected edges | `flow_conservation`, `big_m_activation` | Direct (binary edge selection, `dims() = vec![2; num_potential_edges]`) | +| BoundedComponentSpanningForest | `Or` | `i32` | Binary y_e (edge in forest) + integer label l_v (component root ID) + flow vars | Forest structure (no cycles via MTZ on directed version); component assignment via labels; per-component total vertex **weight** ≤ B (not size) | `flow_conservation`, `mtz_ordering` | Edge selection → component label decode (`dims() = vec![2; num_edges]` or label-based) | +| MinimumCutIntoBoundedSets | `Or` | `bool` | Binary x_v (partition side) + binary y_e (cut edge) | Balance: L ≤ Σx_v ≤ U; cut linking: y_e ≥ x_u - x_v and y_e ≥ x_v - x_u; Σ w_e·y_e ≤ bound | — | Direct (binary partition) | +| StrongConnectivityAugmentation | `Or` | `i32` | Binary y_a (add arc) + multi-commodity flow | For each ordered pair (s,t): unit flow from s to t through original + selected arcs | `flow_conservation`, `big_m_activation` | Direct (binary arc selection) | + +--- + +## Phase 5: Matrix/encoding (5 reductions) + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| BMF | `Min` | `bool` | Binary a_{ik}, b_{kj} + auxiliary p_{ijk} (McCormick for a_{ik}·b_{kj}) + binary w_{ij} (reconstructed entry) | p_{ijk} via McCormick; w_{ij} ≥ p_{ijk} for all k (OR-of-ANDs); w_{ij} ≤ Σ_k p_{ijk}; minimize Σ |A_{ij} - w_{ij}| | `mccormick_product` | Direct (binary factor matrices) | +| ConsecutiveBlockMinimization | `Or` | `bool` | Binary x_{c,p} (column c at position p) + binary b_{r,p} (block start at row r, position p) | Column permutation (one-hot assignment); block detection: b_{r,p} activated when row r transitions 0→1 at position p; Σ blocks ≤ bound | — | One-hot decode → column permutation (`dims() = vec![num_cols; num_cols]`) | +| ConsecutiveOnesMatrixAugmentation | `Or` | `bool` | Binary x_{c,p} (column permutation) + binary f_{r,j} (flip entry r,j) | Permutation + consecutive-ones property after flips; minimize/bound total flips | — | One-hot decode → column permutation (`dims() = vec![num_cols; num_cols]`) | +| ConsecutiveOnesSubmatrix | `Or` | `bool` | Binary s_j (select column j) + auxiliary binary x_{c,p} (column permutation of selected columns) | Exactly K columns selected (Σ s_j = K); permutation of selected columns; C1P enforced on every row within selected+permuted columns. s_j at indices 0..num_cols (extracted directly). x_{c,p} are auxiliary. | — | Direct (s_j binary selection, `dims() = vec![2; num_cols]`) | +| SparseMatrixCompression | `Or` | `bool` | Binary x_{i,g} (row i in group g) | Row-to-group assignment (one group per row); compatibility: conflicting rows not in same group; num groups ≤ K | — | One-hot decode → group assignment | + +--- + +## Phase 6: Sequence/misc (5 reductions) + +| Problem | Value | ILP type | Variables | Key constraints | Helpers | Extract | +|---------|-------|----------|-----------|-----------------|---------|---------| +| ShortestCommonSupersequence | `Or` | `bool` | Binary x_{p,a} (position p has symbol a) + match vars m_{s,j,p} | Symbol assignment + monotone matching for each input string; total length ≤ bound | — | Symbol sequence extraction | +| StringToStringCorrection | `Or` | `bool` | Binary d_{i,j,op} (edit operation at alignment point) | Alignment grid + operation exclusivity + cost ≤ bound | — | Direct (binary operation selection) | +| PaintShop | `Min` | `bool` | Binary x_i (color for car i's first occurrence) + binary c_p (color-change indicator at position p) | Pairing: second occurrence gets 1-x_i; c_p ≥ color_p - color_{p-1} and c_p ≥ color_{p-1} - color_p; minimize Σ c_p | — | Direct (x_i binary, `dims() = vec![2; num_cars]`) | +| IsomorphicSpanningTree | `Or` | `bool` | Binary x_{u,v} (tree vertex u maps to graph vertex v) | Bijection: one-hot per tree vertex and per graph vertex; edge preservation: for each tree edge {u,w} and graph non-edge {v,z}, x_{u,v} + x_{w,z} ≤ 1 (no McCormick or flow needed — bijection preserving tree edges automatically produces a spanning tree) | — | One-hot decode → bijection (`dims() = vec![n; n]`) | +| RootedTreeStorageAssignment | `Or` | `i32` | Binary p_{v,u} (node v's parent is u) + integer depth d_v | Tree structure: each non-root node has exactly one parent, acyclicity via depth ordering (d_v > d_u if u is parent of v), connectivity; per-subset path cost ≤ bound | — | One-hot parent decode → parent array (`dims() = vec![n; n]`) | + +--- + +## Testing Strategy + +- Each reduction gets one `test__to_ilp_closed_loop` test +- **Optimization problems** (BottleneckTSP, MinTardiness, QAP, BMF, PaintShop): use `assert_optimization_round_trip_from_optimization_target` +- **Satisfaction problems** (all others): use the satisfaction round-trip variant +- Test instances should be small enough for brute-force cross-check (n ≤ 6-8) +- All tests in `src/unit_tests/rules/_ilp.rs` +- Helper module gets standalone unit tests in `src/unit_tests/rules/ilp_helpers.rs` + +## Integration Checklist (per reduction) + +Each new reduction file requires: +1. `#[cfg(feature = "ilp-solver")] pub(crate) mod _ilp;` in `src/rules/mod.rs` +2. `specs.extend(_ilp::canonical_rule_example_specs());` in the `#[cfg(feature = "ilp-solver")]` block of `canonical_rule_example_specs()` in `src/rules/mod.rs` +3. `#[reduction(overhead = { ... })]` with verified overhead expressions referencing source-type getter methods +4. Closed-loop test + paper entry + +## Paper + +Each reduction gets a `reduction-rule` entry in `docs/paper/reductions.typ` with: +- Rule statement describing the formulation +- Proof sketch (variable layout, constraint count, correctness argument) +- Example flag set to `true` where pedagogically useful + +## Post-merge + +- Update #762 body: move 39 problems from Tier 3 to Tier 1 +- Close #728 (TimetableDesign→ILP) and #733 (IntegralFlowHomologousArcs→ILP) +- File separate issues for deferred problems: PartialFeedbackEdgeSet→ILP, RootedTreeArrangement→ILP From cd96ca3daa2a589d9b5dc6315d4a01966dfe4edf Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 15:50:21 +0800 Subject: [PATCH 2/6] feat: add shared ILP linearization helpers (McCormick, MTZ, flow, big-M, abs-diff, minimax, one-hot) Co-Authored-By: Claude Opus 4.6 --- src/rules/ilp_helpers.rs | 196 ++++++++++++++++++++++++++++ src/rules/mod.rs | 2 + src/unit_tests/rules/ilp_helpers.rs | 177 +++++++++++++++++++++++++ 3 files changed, 375 insertions(+) create mode 100644 src/rules/ilp_helpers.rs create mode 100644 src/unit_tests/rules/ilp_helpers.rs diff --git a/src/rules/ilp_helpers.rs b/src/rules/ilp_helpers.rs new file mode 100644 index 00000000..826b31f7 --- /dev/null +++ b/src/rules/ilp_helpers.rs @@ -0,0 +1,196 @@ +//! Shared ILP linearization helpers for Tier 3 reductions. +//! +//! These functions generate `LinearConstraint` sets for common ILP patterns: +//! McCormick products, MTZ orderings, flow conservation, big-M activation, +//! absolute-value differentials, minimax bounds, and one-hot decoding. + +use crate::models::algebraic::LinearConstraint; + +/// McCormick linearization: `y = x_a * x_b` (both binary). +/// +/// Returns 3 constraints: `y ≤ x_a`, `y ≤ x_b`, `y ≥ x_a + x_b - 1`. +pub fn mccormick_product(y_idx: usize, x_a: usize, x_b: usize) -> Vec { + vec![ + // y <= x_a + LinearConstraint::le(vec![(y_idx, 1.0), (x_a, -1.0)], 0.0), + // y <= x_b + LinearConstraint::le(vec![(y_idx, 1.0), (x_b, -1.0)], 0.0), + // y >= x_a + x_b - 1 => x_a + x_b - y <= 1 + LinearConstraint::le(vec![(x_a, 1.0), (x_b, 1.0), (y_idx, -1.0)], 1.0), + ] +} + +/// MTZ topological ordering for directed arcs. +/// +/// For each arc `(u → v)`: `o_v - o_u ≥ 1 - M*(1 - x_u) - M*(1 - x_v)` +/// when both endpoints are kept (x=0 means kept, x=1 means removed). +/// Also emits bound constraints: `0 ≤ o_i ≤ n-1`. +/// +/// `x_offset`: start index for removal indicator variables. +/// `o_offset`: start index for ordering variables. +pub fn mtz_ordering( + arcs: &[(usize, usize)], + n: usize, + x_offset: usize, + o_offset: usize, +) -> Vec { + let big_m = n as f64; + let mut constraints = Vec::new(); + + for &(u, v) in arcs { + // o_v - o_u + M*x_u + M*x_v >= 1 + constraints.push(LinearConstraint::ge( + vec![ + (o_offset + v, 1.0), + (o_offset + u, -1.0), + (x_offset + u, big_m), + (x_offset + v, big_m), + ], + 1.0, + )); + } + + // Bound constraints: 0 <= o_i <= n-1 + for i in 0..n { + constraints.push(LinearConstraint::le( + vec![(o_offset + i, 1.0)], + (n - 1) as f64, + )); + constraints.push(LinearConstraint::ge(vec![(o_offset + i, 1.0)], 0.0)); + } + + constraints +} + +/// Flow conservation at each node. +/// +/// For each node `u`: `Σ_{(u,v)} f_{uv} - Σ_{(v,u)} f_{vu} = demand[u]`. +/// +/// `flow_idx` maps an arc index to the ILP variable index for that arc's flow. +pub fn flow_conservation( + arcs: &[(usize, usize)], + num_nodes: usize, + flow_idx: &dyn Fn(usize) -> usize, + demand: &[f64], +) -> Vec { + let mut constraints = Vec::with_capacity(num_nodes); + for node in 0..num_nodes { + let mut terms = Vec::new(); + for (arc_idx, &(u, v)) in arcs.iter().enumerate() { + if u == node { + terms.push((flow_idx(arc_idx), 1.0)); // outgoing + } + if v == node { + terms.push((flow_idx(arc_idx), -1.0)); // incoming + } + } + constraints.push(LinearConstraint::eq(terms, demand[node])); + } + constraints +} + +/// Big-M activation: `f ≤ M * y`. Single constraint. +pub fn big_m_activation(f_idx: usize, y_idx: usize, big_m: f64) -> LinearConstraint { + // f - M*y <= 0 + LinearConstraint::le(vec![(f_idx, 1.0), (y_idx, -big_m)], 0.0) +} + +/// Absolute value linearization: `|a - b| ≤ z`. +/// +/// Returns 2 constraints: `a - b ≤ z`, `b - a ≤ z`. +pub fn abs_diff_le(a_idx: usize, b_idx: usize, z_idx: usize) -> Vec { + vec![ + // a - b - z <= 0 + LinearConstraint::le(vec![(a_idx, 1.0), (b_idx, -1.0), (z_idx, -1.0)], 0.0), + // b - a - z <= 0 + LinearConstraint::le(vec![(b_idx, 1.0), (a_idx, -1.0), (z_idx, -1.0)], 0.0), + ] +} + +/// Minimax: `z ≥ expr_i` for each expression. +/// +/// Each `expr` is a list of `(var_idx, coeff)` terms representing a linear expression. +pub fn minimax_constraints(z_idx: usize, expr_terms: &[Vec<(usize, f64)>]) -> Vec { + expr_terms + .iter() + .map(|terms| { + // z >= Σ coeff_j * x_j => z - Σ coeff_j * x_j >= 0 + let mut constraint_terms = vec![(z_idx, 1.0)]; + for &(var, coeff) in terms { + constraint_terms.push((var, -coeff)); + } + LinearConstraint::ge(constraint_terms, 0.0) + }) + .collect() +} + +/// One-hot to index extraction. +/// +/// Given `num_items * num_slots` binary assignment variables starting at `var_offset`, +/// decode each slot `p` → value `v` where `x_{v*num_slots + p} = 1`. +/// +/// Layout: variable at index `var_offset + v * num_slots + p` represents +/// "item v is assigned to slot p". +pub fn one_hot_decode( + solution: &[usize], + num_items: usize, + num_slots: usize, + var_offset: usize, +) -> Vec { + (0..num_slots) + .map(|p| { + (0..num_items) + .find(|&v| solution[var_offset + v * num_slots + p] == 1) + .unwrap_or(0) + }) + .collect() +} + +/// Convert a permutation to Lehmer code. +/// +/// Given a permutation of `[0..n)`, returns the Lehmer code representation +/// where each element counts the number of smaller elements to its right. +pub fn permutation_to_lehmer(perm: &[usize]) -> Vec { + let n = perm.len(); + let mut lehmer = Vec::with_capacity(n); + for i in 0..n { + let count = (i + 1..n).filter(|&j| perm[j] < perm[i]).count(); + lehmer.push(count); + } + lehmer +} + +/// One-hot assignment constraints: each item assigned to exactly one slot, +/// each slot assigned at most one item. +/// +/// Returns constraints for a `num_items × num_slots` assignment matrix +/// starting at `var_offset`. +pub fn one_hot_assignment_constraints( + num_items: usize, + num_slots: usize, + var_offset: usize, +) -> Vec { + let mut constraints = Vec::new(); + + // Each item assigned to exactly one slot + for v in 0..num_items { + let terms: Vec<(usize, f64)> = (0..num_slots) + .map(|p| (var_offset + v * num_slots + p, 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Each slot assigned at most one item + for p in 0..num_slots { + let terms: Vec<(usize, f64)> = (0..num_items) + .map(|v| (var_offset + v * num_slots + p, 1.0)) + .collect(); + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + constraints +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ilp_helpers.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 42fa8019..e6afb99a 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -49,6 +49,8 @@ pub(crate) mod travelingsalesman_qubo; pub mod unitdiskmapping; +#[cfg(feature = "ilp-solver")] +pub(crate) mod ilp_helpers; #[cfg(feature = "ilp-solver")] pub(crate) mod binpacking_ilp; #[cfg(feature = "ilp-solver")] diff --git a/src/unit_tests/rules/ilp_helpers.rs b/src/unit_tests/rules/ilp_helpers.rs new file mode 100644 index 00000000..0036098c --- /dev/null +++ b/src/unit_tests/rules/ilp_helpers.rs @@ -0,0 +1,177 @@ +use super::*; +use crate::models::algebraic::{Comparison, LinearConstraint}; + +#[test] +fn test_mccormick_product_constraints() { + let constraints = mccormick_product(2, 0, 1); + assert_eq!(constraints.len(), 3); + + // y <= x_a: y - x_a <= 0 + assert_eq!(constraints[0].cmp, Comparison::Le); + assert_eq!(constraints[0].rhs, 0.0); + assert_eq!(constraints[0].terms, vec![(2, 1.0), (0, -1.0)]); + + // y <= x_b: y - x_b <= 0 + assert_eq!(constraints[1].cmp, Comparison::Le); + assert_eq!(constraints[1].rhs, 0.0); + assert_eq!(constraints[1].terms, vec![(2, 1.0), (1, -1.0)]); + + // y >= x_a + x_b - 1: x_a + x_b - y <= 1 + assert_eq!(constraints[2].cmp, Comparison::Le); + assert_eq!(constraints[2].rhs, 1.0); + assert_eq!(constraints[2].terms, vec![(0, 1.0), (1, 1.0), (2, -1.0)]); +} + +#[test] +fn test_mccormick_product_satisfies_truth_table() { + let constraints = mccormick_product(2, 0, 1); + // (x_a, x_b, y) -> product: y = x_a * x_b + let cases = vec![ + (vec![0, 0, 0], true), // 0*0=0 + (vec![0, 1, 0], true), // 0*1=0 + (vec![1, 0, 0], true), // 1*0=0 + (vec![1, 1, 1], true), // 1*1=1 + (vec![0, 0, 1], false), // y=1 but 0*0=0 + (vec![1, 1, 0], false), // y=0 but 1*1=1 + ]; + for (vals, expected) in cases { + let i64_vals: Vec = vals.iter().map(|&v| v as i64).collect(); + let all_satisfied = constraints.iter().all(|c| c.is_satisfied(&i64_vals)); + assert_eq!(all_satisfied, expected, "case {:?}", vals); + } +} + +#[test] +fn test_mtz_ordering_creates_arc_and_bound_constraints() { + let arcs = vec![(0, 1), (1, 2)]; + let n = 3; + let constraints = mtz_ordering(&arcs, n, 0, 3); + // 2 arc constraints + 2*3 bound constraints = 8 + assert_eq!(constraints.len(), 8); +} + +#[test] +fn test_flow_conservation_simple_path() { + // Simple path: 0 -> 1 -> 2, demand: +1 at source(0), -1 at sink(2), 0 at transit(1) + let arcs = vec![(0, 1), (1, 2)]; + let demand = vec![1.0, 0.0, -1.0]; + let constraints = flow_conservation(&arcs, 3, &|i| i, &demand); + assert_eq!(constraints.len(), 3); + + // Node 0: f_01 = 1 + assert_eq!(constraints[0].cmp, Comparison::Eq); + assert_eq!(constraints[0].rhs, 1.0); + + // Node 1: f_12 - f_01 = 0 + assert_eq!(constraints[1].cmp, Comparison::Eq); + assert_eq!(constraints[1].rhs, 0.0); + + // Node 2: -f_12 = -1 + assert_eq!(constraints[2].cmp, Comparison::Eq); + assert_eq!(constraints[2].rhs, -1.0); + + // Solution: f_01 = 1, f_12 = 1 + let values = vec![1i64, 1]; + assert!(constraints.iter().all(|c| c.is_satisfied(&values))); +} + +#[test] +fn test_big_m_activation() { + let c = big_m_activation(0, 1, 10.0); + assert_eq!(c.cmp, Comparison::Le); + // f - 10*y <= 0 + assert_eq!(c.terms, vec![(0, 1.0), (1, -10.0)]); + assert_eq!(c.rhs, 0.0); + + // y=1, f=5: 5 - 10 = -5 <= 0 ✓ + assert!(c.is_satisfied(&[5, 1])); + // y=0, f=5: 5 - 0 = 5 > 0 ✗ + assert!(!c.is_satisfied(&[5, 0])); + // y=1, f=10: 10 - 10 = 0 <= 0 ✓ + assert!(c.is_satisfied(&[10, 1])); +} + +#[test] +fn test_abs_diff_le() { + let constraints = abs_diff_le(0, 1, 2); + assert_eq!(constraints.len(), 2); + + // |a - b| <= z + // a=3, b=1, z=2: |3-1|=2 <= 2 ✓ + assert!(constraints.iter().all(|c| c.is_satisfied(&[3, 1, 2]))); + // a=3, b=1, z=1: |3-1|=2 > 1 ✗ + assert!(!constraints.iter().all(|c| c.is_satisfied(&[3, 1, 1]))); + // a=1, b=3, z=2: |1-3|=2 <= 2 ✓ + assert!(constraints.iter().all(|c| c.is_satisfied(&[1, 3, 2]))); +} + +#[test] +fn test_minimax_constraints() { + // z >= x_0, z >= x_1 + let exprs = vec![vec![(0, 1.0)], vec![(1, 1.0)]]; + let constraints = minimax_constraints(2, &exprs); + assert_eq!(constraints.len(), 2); + + // z=5, x_0=3, x_1=4: z >= max(3,4) ✓ + assert!(constraints.iter().all(|c| c.is_satisfied(&[3, 4, 5]))); + // z=3, x_0=3, x_1=4: z < max(3,4) ✗ + assert!(!constraints.iter().all(|c| c.is_satisfied(&[3, 4, 3]))); +} + +#[test] +fn test_one_hot_decode_permutation() { + // 3x3 assignment: item 0 at slot 2, item 1 at slot 0, item 2 at slot 1 + // Layout: x_{v*3+p} + let mut solution = vec![0usize; 9]; + solution[0 * 3 + 2] = 1; // item 0 -> slot 2 + solution[1 * 3 + 0] = 1; // item 1 -> slot 0 + solution[2 * 3 + 1] = 1; // item 2 -> slot 1 + let decoded = one_hot_decode(&solution, 3, 3, 0); + assert_eq!(decoded, vec![1, 2, 0]); // slot 0 gets item 1, slot 1 gets item 2, slot 2 gets item 0 +} + +#[test] +fn test_one_hot_decode_with_offset() { + // Same as above but with offset=5 + let mut solution = vec![0usize; 14]; + solution[5 + 0 * 3 + 2] = 1; + solution[5 + 1 * 3 + 0] = 1; + solution[5 + 2 * 3 + 1] = 1; + let decoded = one_hot_decode(&solution, 3, 3, 5); + assert_eq!(decoded, vec![1, 2, 0]); +} + +#[test] +fn test_permutation_to_lehmer() { + // Identity permutation [0,1,2] -> Lehmer [0,0,0] + assert_eq!(permutation_to_lehmer(&[0, 1, 2]), vec![0, 0, 0]); + // Reverse [2,1,0] -> Lehmer [2,1,0] + assert_eq!(permutation_to_lehmer(&[2, 1, 0]), vec![2, 1, 0]); + // [1,0,2] -> Lehmer [1,0,0] + assert_eq!(permutation_to_lehmer(&[1, 0, 2]), vec![1, 0, 0]); +} + +#[test] +fn test_one_hot_assignment_constraints() { + let constraints = one_hot_assignment_constraints(3, 3, 0); + // 3 "each item to one slot" + 3 "each slot at most one item" = 6 + assert_eq!(constraints.len(), 6); + + // First 3 are equality (item assignment) + for c in &constraints[..3] { + assert_eq!(c.cmp, Comparison::Eq); + assert_eq!(c.rhs, 1.0); + } + // Last 3 are le (slot capacity) + for c in &constraints[3..] { + assert_eq!(c.cmp, Comparison::Le); + assert_eq!(c.rhs, 1.0); + } + + // Valid permutation: item 0->slot 0, item 1->slot 1, item 2->slot 2 + let mut solution = vec![0i64; 9]; + solution[0] = 1; // item 0 -> slot 0 + solution[4] = 1; // item 1 -> slot 1 + solution[8] = 1; // item 2 -> slot 2 + assert!(constraints.iter().all(|c| c.is_satisfied(&solution))); +} From 738c179948285985fedf36b88a86de2970d29b58 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 17:55:11 +0800 Subject: [PATCH 3/6] feat: add 39 Tier 3 ILP reductions + shared linearization helpers Implements all remaining Tier 3 ILP reductions covering graph, scheduling, flow, sequencing, matrix, and miscellaneous NP-hard problems. Each reduction includes unit tests with closed-loop round-trip verification and canonical example specs for the example database. Co-Authored-By: Claude Opus 4.6 --- src/rules/acyclicpartition_ilp.rs | 195 ++++++++ .../balancedcompletebipartitesubgraph_ilp.rs | 100 ++++ src/rules/bicliquecover_ilp.rs | 125 +++++ src/rules/biconnectivityaugmentation_ilp.rs | 229 +++++++++ src/rules/bmf_ilp.rs | 151 ++++++ src/rules/bottlenecktravelingsalesman_ilp.rs | 204 ++++++++ .../boundedcomponentspanningforest_ilp.rs | 220 +++++++++ src/rules/consecutiveblockminimization_ilp.rs | 187 ++++++++ .../consecutiveonesmatrixaugmentation_ilp.rs | 205 ++++++++ src/rules/consecutiveonessubmatrix_ilp.rs | 228 +++++++++ src/rules/disjointconnectingpaths_ilp.rs | 202 ++++++++ src/rules/flowshopscheduling_ilp.rs | 235 ++++++++++ src/rules/hamiltonianpath_ilp.rs | 136 ++++++ src/rules/ilp_helpers.rs | 8 +- src/rules/integralflowhomologousarcs_ilp.rs | 120 +++++ src/rules/integralflowwithmultipliers_ilp.rs | 119 +++++ src/rules/isomorphicspanningtree_ilp.rs | 116 +++++ src/rules/lengthboundeddisjointpaths_ilp.rs | 226 +++++++++ src/rules/longestcircuit_ilp.rs | 184 ++++++++ src/rules/minimumcutintoboundedsets_ilp.rs | 126 +++++ src/rules/minimumtardinesssequencing_ilp.rs | 130 ++++++ src/rules/mixedchinesepostman_ilp.rs | 405 ++++++++++++++++ src/rules/mod.rs | 145 +++++- src/rules/optimallineararrangement_ilp.rs | 163 +++++++ src/rules/paintshop_ilp.rs | 147 ++++++ src/rules/pathconstrainednetworkflow_ilp.rs | 102 ++++ src/rules/quadraticassignment_ilp.rs | 146 ++++++ .../resourceconstrainedscheduling_ilp.rs | 127 +++++ src/rules/rootedtreestorageassignment_ilp.rs | 440 ++++++++++++++++++ src/rules/ruralpostman_ilp.rs | 251 ++++++++++ ...cingtominimizemaximumcumulativecost_ilp.rs | 125 +++++ ...quencingtominimizeweightedtardiness_ilp.rs | 185 ++++++++ ...uencingwithreleasetimesanddeadlines_ilp.rs | 170 +++++++ src/rules/shortestcommonsupersequence_ilp.rs | 157 +++++++ src/rules/sparsematrixcompression_ilp.rs | 140 ++++++ src/rules/stackercrane_ilp.rs | 241 ++++++++++ src/rules/steinertreeingraphs_ilp.rs | 160 +++++++ src/rules/stringtostringcorrection_ilp.rs | 408 ++++++++++++++++ .../strongconnectivityaugmentation_ilp.rs | 220 +++++++++ src/rules/subgraphisomorphism_ilp.rs | 131 ++++++ src/rules/timetabledesign_ilp.rs | 131 ++++++ src/unit_tests/rules/acyclicpartition_ilp.rs | 80 ++++ .../balancedcompletebipartitesubgraph_ilp.rs | 60 +++ src/unit_tests/rules/bicliquecover_ilp.rs | 63 +++ .../rules/biconnectivityaugmentation_ilp.rs | 79 ++++ src/unit_tests/rules/bmf_ilp.rs | 58 +++ .../rules/bottlenecktravelingsalesman_ilp.rs | 98 ++++ .../boundedcomponentspanningforest_ilp.rs | 85 ++++ .../rules/consecutiveblockminimization_ilp.rs | 64 +++ .../consecutiveonesmatrixaugmentation_ilp.rs | 71 +++ .../rules/consecutiveonessubmatrix_ilp.rs | 88 ++++ .../rules/disjointconnectingpaths_ilp.rs | 22 + .../rules/flowshopscheduling_ilp.rs | 67 +++ src/unit_tests/rules/hamiltonianpath_ilp.rs | 88 ++++ src/unit_tests/rules/ilp_helpers.rs | 14 +- .../rules/integralflowhomologousarcs_ilp.rs | 32 ++ .../rules/integralflowwithmultipliers_ilp.rs | 31 ++ .../rules/isomorphicspanningtree_ilp.rs | 73 +++ .../rules/lengthboundeddisjointpaths_ilp.rs | 23 + src/unit_tests/rules/longestcircuit_ilp.rs | 111 +++++ .../rules/minimumcutintoboundedsets_ilp.rs | 70 +++ .../rules/minimumtardinesssequencing_ilp.rs | 64 +++ .../rules/mixedchinesepostman_ilp.rs | 29 ++ .../rules/optimallineararrangement_ilp.rs | 97 ++++ src/unit_tests/rules/paintshop_ilp.rs | 63 +++ .../rules/pathconstrainednetworkflow_ilp.rs | 31 ++ .../rules/quadraticassignment_ilp.rs | 107 +++++ .../resourceconstrainedscheduling_ilp.rs | 71 +++ .../rules/rootedtreestorageassignment_ilp.rs | 97 ++++ src/unit_tests/rules/ruralpostman_ilp.rs | 29 ++ ...cingtominimizemaximumcumulativecost_ilp.rs | 59 +++ ...quencingtominimizeweightedtardiness_ilp.rs | 66 +++ ...uencingwithreleasetimesanddeadlines_ilp.rs | 59 +++ .../rules/shortestcommonsupersequence_ilp.rs | 61 +++ .../rules/sparsematrixcompression_ilp.rs | 79 ++++ src/unit_tests/rules/stackercrane_ilp.rs | 23 + .../rules/steinertreeingraphs_ilp.rs | 23 + .../rules/stringtostringcorrection_ilp.rs | 79 ++++ .../strongconnectivityaugmentation_ilp.rs | 75 +++ .../rules/subgraphisomorphism_ilp.rs | 96 ++++ src/unit_tests/rules/timetabledesign_ilp.rs | 83 ++++ 81 files changed, 9986 insertions(+), 22 deletions(-) create mode 100644 src/rules/acyclicpartition_ilp.rs create mode 100644 src/rules/balancedcompletebipartitesubgraph_ilp.rs create mode 100644 src/rules/bicliquecover_ilp.rs create mode 100644 src/rules/biconnectivityaugmentation_ilp.rs create mode 100644 src/rules/bmf_ilp.rs create mode 100644 src/rules/bottlenecktravelingsalesman_ilp.rs create mode 100644 src/rules/boundedcomponentspanningforest_ilp.rs create mode 100644 src/rules/consecutiveblockminimization_ilp.rs create mode 100644 src/rules/consecutiveonesmatrixaugmentation_ilp.rs create mode 100644 src/rules/consecutiveonessubmatrix_ilp.rs create mode 100644 src/rules/disjointconnectingpaths_ilp.rs create mode 100644 src/rules/flowshopscheduling_ilp.rs create mode 100644 src/rules/hamiltonianpath_ilp.rs create mode 100644 src/rules/integralflowhomologousarcs_ilp.rs create mode 100644 src/rules/integralflowwithmultipliers_ilp.rs create mode 100644 src/rules/isomorphicspanningtree_ilp.rs create mode 100644 src/rules/lengthboundeddisjointpaths_ilp.rs create mode 100644 src/rules/longestcircuit_ilp.rs create mode 100644 src/rules/minimumcutintoboundedsets_ilp.rs create mode 100644 src/rules/minimumtardinesssequencing_ilp.rs create mode 100644 src/rules/mixedchinesepostman_ilp.rs create mode 100644 src/rules/optimallineararrangement_ilp.rs create mode 100644 src/rules/paintshop_ilp.rs create mode 100644 src/rules/pathconstrainednetworkflow_ilp.rs create mode 100644 src/rules/quadraticassignment_ilp.rs create mode 100644 src/rules/resourceconstrainedscheduling_ilp.rs create mode 100644 src/rules/rootedtreestorageassignment_ilp.rs create mode 100644 src/rules/ruralpostman_ilp.rs create mode 100644 src/rules/sequencingtominimizemaximumcumulativecost_ilp.rs create mode 100644 src/rules/sequencingtominimizeweightedtardiness_ilp.rs create mode 100644 src/rules/sequencingwithreleasetimesanddeadlines_ilp.rs create mode 100644 src/rules/shortestcommonsupersequence_ilp.rs create mode 100644 src/rules/sparsematrixcompression_ilp.rs create mode 100644 src/rules/stackercrane_ilp.rs create mode 100644 src/rules/steinertreeingraphs_ilp.rs create mode 100644 src/rules/stringtostringcorrection_ilp.rs create mode 100644 src/rules/strongconnectivityaugmentation_ilp.rs create mode 100644 src/rules/subgraphisomorphism_ilp.rs create mode 100644 src/rules/timetabledesign_ilp.rs create mode 100644 src/unit_tests/rules/acyclicpartition_ilp.rs create mode 100644 src/unit_tests/rules/balancedcompletebipartitesubgraph_ilp.rs create mode 100644 src/unit_tests/rules/bicliquecover_ilp.rs create mode 100644 src/unit_tests/rules/biconnectivityaugmentation_ilp.rs create mode 100644 src/unit_tests/rules/bmf_ilp.rs create mode 100644 src/unit_tests/rules/bottlenecktravelingsalesman_ilp.rs create mode 100644 src/unit_tests/rules/boundedcomponentspanningforest_ilp.rs create mode 100644 src/unit_tests/rules/consecutiveblockminimization_ilp.rs create mode 100644 src/unit_tests/rules/consecutiveonesmatrixaugmentation_ilp.rs create mode 100644 src/unit_tests/rules/consecutiveonessubmatrix_ilp.rs create mode 100644 src/unit_tests/rules/disjointconnectingpaths_ilp.rs create mode 100644 src/unit_tests/rules/flowshopscheduling_ilp.rs create mode 100644 src/unit_tests/rules/hamiltonianpath_ilp.rs create mode 100644 src/unit_tests/rules/integralflowhomologousarcs_ilp.rs create mode 100644 src/unit_tests/rules/integralflowwithmultipliers_ilp.rs create mode 100644 src/unit_tests/rules/isomorphicspanningtree_ilp.rs create mode 100644 src/unit_tests/rules/lengthboundeddisjointpaths_ilp.rs create mode 100644 src/unit_tests/rules/longestcircuit_ilp.rs create mode 100644 src/unit_tests/rules/minimumcutintoboundedsets_ilp.rs create mode 100644 src/unit_tests/rules/minimumtardinesssequencing_ilp.rs create mode 100644 src/unit_tests/rules/mixedchinesepostman_ilp.rs create mode 100644 src/unit_tests/rules/optimallineararrangement_ilp.rs create mode 100644 src/unit_tests/rules/paintshop_ilp.rs create mode 100644 src/unit_tests/rules/pathconstrainednetworkflow_ilp.rs create mode 100644 src/unit_tests/rules/quadraticassignment_ilp.rs create mode 100644 src/unit_tests/rules/resourceconstrainedscheduling_ilp.rs create mode 100644 src/unit_tests/rules/rootedtreestorageassignment_ilp.rs create mode 100644 src/unit_tests/rules/ruralpostman_ilp.rs create mode 100644 src/unit_tests/rules/sequencingtominimizemaximumcumulativecost_ilp.rs create mode 100644 src/unit_tests/rules/sequencingtominimizeweightedtardiness_ilp.rs create mode 100644 src/unit_tests/rules/sequencingwithreleasetimesanddeadlines_ilp.rs create mode 100644 src/unit_tests/rules/shortestcommonsupersequence_ilp.rs create mode 100644 src/unit_tests/rules/sparsematrixcompression_ilp.rs create mode 100644 src/unit_tests/rules/stackercrane_ilp.rs create mode 100644 src/unit_tests/rules/steinertreeingraphs_ilp.rs create mode 100644 src/unit_tests/rules/stringtostringcorrection_ilp.rs create mode 100644 src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs create mode 100644 src/unit_tests/rules/subgraphisomorphism_ilp.rs create mode 100644 src/unit_tests/rules/timetabledesign_ilp.rs diff --git a/src/rules/acyclicpartition_ilp.rs b/src/rules/acyclicpartition_ilp.rs new file mode 100644 index 00000000..18a58090 --- /dev/null +++ b/src/rules/acyclicpartition_ilp.rs @@ -0,0 +1,195 @@ +//! Reduction from AcyclicPartition to ILP. +//! +//! One-hot assignment x_{v,c}, McCormick same-class indicators s_{t,c}, +//! crossing flags y_t, class ordering o_c, vertex-order copies p_v. +//! See the paper entry for the full formulation. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::AcyclicPartition; +use crate::reduction; +use crate::rules::ilp_helpers::mccormick_product; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionAcyclicPartitionToILP { + target: ILP, + n: usize, +} + +impl ReductionResult for ReductionAcyclicPartitionToILP { + type Source = AcyclicPartition; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// One-hot decode: for each vertex v, output the unique c with x_{v,c} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + (0..n) + .map(|v| { + (0..n) + .find(|&c| target_solution[v * n + c] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices * num_vertices + num_arcs * num_vertices + num_arcs + 2 * num_vertices", + num_constraints = "num_vertices + num_vertices + num_arcs * num_vertices + num_arcs + 1 + 2 * num_vertices + 2 * num_vertices * num_vertices + num_arcs", + } +)] +impl ReduceTo> for AcyclicPartition { + type Result = ReductionAcyclicPartitionToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let arcs = self.graph().arcs(); + let m = arcs.len(); + + // Variable indices: + // x_{v,c} : v*n + c [0, n^2) + // s_{t,c} : n^2 + t*n + c [n^2, n^2 + m*n) + // y_t : n^2 + m*n + t [n^2 + m*n, n^2 + m*n + m) + // o_c : n^2 + m*n + m + c [n^2 + m*n + m, n^2 + m*n + m + n) + // p_v : n^2 + m*n + m + n + v [n^2 + m*n + m + n, n^2 + m*n + m + 2n) + let x_idx = |v: usize, c: usize| -> usize { v * n + c }; + let s_idx = |t: usize, c: usize| -> usize { n * n + t * n + c }; + let y_idx = |t: usize| -> usize { n * n + m * n + t }; + let o_idx = |c: usize| -> usize { n * n + m * n + m + c }; + let p_idx = |v: usize| -> usize { n * n + m * n + m + n + v }; + + let num_vars = n * n + m * n + m + 2 * n; + let mut constraints = Vec::new(); + let big_m = n as f64; + + // 1) Assignment: Σ_c x_{v,c} = 1 for each vertex v + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|c| (x_idx(v, c), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2) Weight bound: Σ_v w_v * x_{v,c} ≤ B for each class c + for c in 0..n { + let terms: Vec<(usize, f64)> = self + .vertex_weights() + .iter() + .enumerate() + .map(|(v, &w)| (x_idx(v, c), w as f64)) + .collect(); + constraints.push(LinearConstraint::le(terms, *self.weight_bound() as f64)); + } + + // 3) McCormick: s_{t,c} = x_{u_t,c} * x_{v_t,c} + for (t, &(u, v)) in arcs.iter().enumerate() { + for c in 0..n { + constraints.extend(mccormick_product(s_idx(t, c), x_idx(u, c), x_idx(v, c))); + } + } + + // 4) Crossing: y_t + Σ_c s_{t,c} = 1 + for t in 0..m { + let mut terms: Vec<(usize, f64)> = vec![(y_idx(t), 1.0)]; + for c in 0..n { + terms.push((s_idx(t, c), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 5) Cost bound: Σ_t cost(a_t) * y_t ≤ K + let cost_terms: Vec<(usize, f64)> = self + .arc_costs() + .iter() + .enumerate() + .map(|(t, &c)| (y_idx(t), c as f64)) + .collect(); + constraints.push(LinearConstraint::le(cost_terms, *self.cost_bound() as f64)); + + // 6) Order bounds: 0 ≤ o_c ≤ n-1, 0 ≤ p_v ≤ n-1 + for c in 0..n { + constraints.push(LinearConstraint::ge(vec![(o_idx(c), 1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(o_idx(c), 1.0)], (n - 1) as f64)); + } + for v in 0..n { + constraints.push(LinearConstraint::ge(vec![(p_idx(v), 1.0)], 0.0)); + constraints.push(LinearConstraint::le(vec![(p_idx(v), 1.0)], (n - 1) as f64)); + } + + // 7) Link p_v to o_c: p_v - o_c ≤ (n-1)(1 - x_{v,c}) and o_c - p_v ≤ (n-1)(1 - x_{v,c}) + for v in 0..n { + for c in 0..n { + // p_v - o_c + (n-1)*x_{v,c} ≤ n-1 + constraints.push(LinearConstraint::le( + vec![ + (p_idx(v), 1.0), + (o_idx(c), -1.0), + (x_idx(v, c), (n - 1) as f64), + ], + (n - 1) as f64, + )); + // o_c - p_v + (n-1)*x_{v,c} ≤ n-1 + constraints.push(LinearConstraint::le( + vec![ + (o_idx(c), 1.0), + (p_idx(v), -1.0), + (x_idx(v, c), (n - 1) as f64), + ], + (n - 1) as f64, + )); + } + } + + // 8) DAG ordering: p_{v_t} - p_{u_t} ≥ 1 - n * Σ_c s_{t,c} + // i.e., p_{v_t} - p_{u_t} + n * Σ_c s_{t,c} ≥ 1 + for (t, &(u, v)) in arcs.iter().enumerate() { + let mut terms = vec![(p_idx(v), 1.0), (p_idx(u), -1.0)]; + for c in 0..n { + terms.push((s_idx(t, c), big_m)); + } + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionAcyclicPartitionToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::DirectedGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "acyclicpartition_to_ilp", + build: || { + let source = AcyclicPartition::new( + DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1, 1], + vec![1, 1, 1], + 3, + 2, + ); + let reduction: ReductionAcyclicPartitionToILP = + crate::rules::ReduceTo::>::reduce_to(&source); + let ilp_sol = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config: ilp_sol, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/acyclicpartition_ilp.rs"] +mod tests; diff --git a/src/rules/balancedcompletebipartitesubgraph_ilp.rs b/src/rules/balancedcompletebipartitesubgraph_ilp.rs new file mode 100644 index 00000000..955fd772 --- /dev/null +++ b/src/rules/balancedcompletebipartitesubgraph_ilp.rs @@ -0,0 +1,100 @@ +//! Reduction from BalancedCompleteBipartiteSubgraph to ILP. +//! +//! Binary variables x_l for left vertices, y_r for right vertices. +//! Cardinality: Σ x_l = k, Σ y_r = k. +//! Non-edge forbidding: x_l + y_r ≤ 1 for every non-edge (l, r). + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::BalancedCompleteBipartiteSubgraph; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use std::collections::HashSet; + +#[derive(Debug, Clone)] +pub struct ReductionBCBSToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionBCBSToILP { + type Source = BalancedCompleteBipartiteSubgraph; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_vertices].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices", + num_constraints = "num_vertices * num_vertices", + } +)] +impl ReduceTo> for BalancedCompleteBipartiteSubgraph { + type Result = ReductionBCBSToILP; + + fn reduce_to(&self) -> Self::Result { + let left = self.left_size(); + let right = self.right_size(); + let n = left + right; + let k = self.k(); + let mut constraints = Vec::new(); + + // Build edge lookup (bipartite-local coords) + let edge_set: HashSet<(usize, usize)> = self.graph().left_edges().iter().copied().collect(); + + // Σ x_l = k (for l in 0..left) + let left_terms: Vec<(usize, f64)> = (0..left).map(|l| (l, 1.0)).collect(); + constraints.push(LinearConstraint::eq(left_terms, k as f64)); + + // Σ y_r = k (for r in 0..right, variable index = left + r) + let right_terms: Vec<(usize, f64)> = (0..right).map(|r| (left + r, 1.0)).collect(); + constraints.push(LinearConstraint::eq(right_terms, k as f64)); + + // Non-edge constraints: x_l + y_r ≤ 1 for (l, r) not in E + for l in 0..left { + for r in 0..right { + if !edge_set.contains(&(l, r)) { + constraints.push(LinearConstraint::le(vec![(l, 1.0), (left + r, 1.0)], 1.0)); + } + } + } + + let target = ILP::new(n, constraints, vec![], ObjectiveSense::Minimize); + ReductionBCBSToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::BipartiteGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "balancedcompletebipartitesubgraph_to_ilp", + build: || { + let source = BalancedCompleteBipartiteSubgraph::new( + BipartiteGraph::new(3, 3, vec![(0, 0), (0, 1), (1, 0), (1, 1), (2, 1), (2, 2)]), + 2, + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1, 0, 1, 1, 0], + target_config: vec![1, 1, 0, 1, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/balancedcompletebipartitesubgraph_ilp.rs"] +mod tests; diff --git a/src/rules/bicliquecover_ilp.rs b/src/rules/bicliquecover_ilp.rs new file mode 100644 index 00000000..e0a1a2c9 --- /dev/null +++ b/src/rules/bicliquecover_ilp.rs @@ -0,0 +1,125 @@ +//! Reduction from BicliqueCover to ILP. +//! +//! Variables: binary x_{l,b} for left-vertex/biclique membership, +//! binary y_{r,b} for right-vertex/biclique membership, +//! binary z_{(l,r),b} = x_{l,b} * y_{r,b} (McCormick product). +//! Coverage: Σ_b z_{(l,r),b} ≥ 1 for every edge (l,r). +//! Objective: minimize Σ x_{l,b} + Σ y_{r,b}. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::BicliqueCover; +use crate::reduction; +use crate::rules::ilp_helpers::mccormick_product; +use crate::rules::traits::{ReduceTo, ReductionResult}; +#[derive(Debug, Clone)] +pub struct ReductionBicliqueCoverToILP { + target: ILP, + /// Number of source-problem variables (num_vertices * k). + source_vars: usize, +} + +impl ReductionResult for ReductionBicliqueCoverToILP { + type Source = BicliqueCover; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract the vertex-by-biclique membership bits, discarding z auxiliaries. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.source_vars].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices * rank + num_vertices * num_vertices * rank", + num_constraints = "num_vertices * num_vertices * rank + num_edges", + } +)] +impl ReduceTo> for BicliqueCover { + type Result = ReductionBicliqueCoverToILP; + + fn reduce_to(&self) -> Self::Result { + let left = self.left_size(); + let right = self.right_size(); + let n = left + right; + let k = self.k(); + let mut constraints = Vec::new(); + + // Variable layout: + // x_{l,b}: index l*k + b (left membership) [0, left*k) + // y_{r,b}: index left*k + r*k + b (right membership) [left*k, n*k) + // z_{(l,r),b}: index n*k + (l*right + r)*k + b (products) [n*k, n*k + left*right*k) + let x_idx = |l: usize, b: usize| -> usize { l * k + b }; + let y_idx = |r: usize, b: usize| -> usize { left * k + r * k + b }; + let z_idx = |l: usize, r: usize, b: usize| -> usize { n * k + (l * right + r) * k + b }; + + let num_vars = n * k + left * right * k; + let source_vars = n * k; + + // McCormick for z_{(l,r),b} = x_{l,b} * y_{r,b} + for l in 0..left { + for r in 0..right { + for b in 0..k { + constraints.extend(mccormick_product(z_idx(l, r, b), x_idx(l, b), y_idx(r, b))); + } + } + } + + // Coverage: Σ_b z_{(l,r),b} ≥ 1 for every edge + for &(l, r) in self.graph().left_edges() { + let terms: Vec<(usize, f64)> = (0..k).map(|b| (z_idx(l, r, b), 1.0)).collect(); + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + // Objective: minimize Σ x_{l,b} + Σ y_{r,b} + let mut objective: Vec<(usize, f64)> = Vec::with_capacity(n * k); + for v in 0..n { + for b in 0..k { + objective.push((v * k + b, 1.0)); + } + } + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionBicliqueCoverToILP { + target, + source_vars, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::BipartiteGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "bicliquecover_to_ilp", + build: || { + // L={0,1}, R={0,1,2}, edges: (0,0),(0,1),(1,1),(1,2), k=2 + let source = BicliqueCover::new( + BipartiteGraph::new(2, 3, vec![(0, 0), (0, 1), (1, 1), (1, 2)]), + 2, + ); + let reduction: ReductionBicliqueCoverToILP = + crate::rules::ReduceTo::>::reduce_to(&source); + let ilp_sol = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config: ilp_sol, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/bicliquecover_ilp.rs"] +mod tests; diff --git a/src/rules/biconnectivityaugmentation_ilp.rs b/src/rules/biconnectivityaugmentation_ilp.rs new file mode 100644 index 00000000..4be442b9 --- /dev/null +++ b/src/rules/biconnectivityaugmentation_ilp.rs @@ -0,0 +1,229 @@ +//! Reduction from BiconnectivityAugmentation to ILP. +//! +//! Select candidate edges under budget and, for every deleted vertex q, +//! certify that the remaining augmented graph stays connected via unit-flow +//! commodities from a surviving root to every other surviving vertex. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::BiconnectivityAugmentation; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +#[derive(Debug, Clone)] +pub struct ReductionBiconnAugToILP { + target: ILP, + num_candidates: usize, +} + +impl ReductionResult for ReductionBiconnAugToILP { + type Source = BiconnectivityAugmentation; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_candidates].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_potential_edges + 2 * num_vertices * num_vertices * (num_edges + num_potential_edges)", + num_constraints = "1 + 2 * num_vertices * num_vertices * num_potential_edges + num_vertices * num_vertices * num_vertices", + } +)] +impl ReduceTo> for BiconnectivityAugmentation { + type Result = ReductionBiconnAugToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let p = self.num_potential_edges(); + + // Trivial case: n ≤ 1 already biconnected + if n <= 1 { + let target = ILP::new(p, vec![], vec![], ObjectiveSense::Minimize); + return ReductionBiconnAugToILP { + target, + num_candidates: p, + }; + } + + let base_edges = self.graph().edges(); + let m = base_edges.len(); + + // Variable layout: + // y_j: j [0, p) + // f^{q,t}_{i,eta}: p + ((q*n + t)*m + i)*2 + eta [p, p + 2*m*n^2) + // g^{q,t}_{j,eta}: p + 2*m*n^2 + ((q*n + t)*p + j)*2 + eta [p + 2*m*n^2, p + 2*n^2*(m+p)) + let num_vars = p + 2 * n * n * (m + p); + let f_idx = |q: usize, t: usize, i: usize, eta: usize| -> usize { + p + ((q * n + t) * m + i) * 2 + eta + }; + let g_idx = |q: usize, t: usize, j: usize, eta: usize| -> usize { + p + 2 * m * n * n + ((q * n + t) * p + j) * 2 + eta + }; + + let mut constraints = Vec::new(); + + // Binary bounds: y_j ≤ 1 + for j in 0..p { + constraints.push(LinearConstraint::le(vec![(j, 1.0)], 1.0)); + } + + // Budget constraint: Σ w_j y_j ≤ B + let budget_terms: Vec<(usize, f64)> = self + .potential_weights() + .iter() + .enumerate() + .map(|(j, &(_, _, w))| (j, w as f64)) + .collect(); + constraints.push(LinearConstraint::le(budget_terms, *self.budget() as f64)); + + // For each deleted vertex q + for q in 0..n { + let root = if q != 0 { 0 } else { 1 }; + + for t in 0..n { + // Pin trivial commodities to zero + if t == q || t == root { + for i in 0..m { + for eta in 0..2 { + constraints + .push(LinearConstraint::eq(vec![(f_idx(q, t, i, eta), 1.0)], 0.0)); + } + } + for j in 0..p { + for eta in 0..2 { + constraints + .push(LinearConstraint::eq(vec![(g_idx(q, t, j, eta), 1.0)], 0.0)); + } + } + continue; + } + + // Pin flows on edges incident to deleted vertex q + for (i, &(u, v)) in base_edges.iter().enumerate() { + if u == q || v == q { + for eta in 0..2 { + constraints + .push(LinearConstraint::eq(vec![(f_idx(q, t, i, eta), 1.0)], 0.0)); + } + } + } + for (j, &(sj, tj, _)) in self.potential_weights().iter().enumerate() { + if sj == q || tj == q { + for eta in 0..2 { + constraints + .push(LinearConstraint::eq(vec![(g_idx(q, t, j, eta), 1.0)], 0.0)); + } + } + } + + // Activation: g^{q,t}_{j,eta} ≤ y_j + for j in 0..p { + let &(sj, tj, _) = &self.potential_weights()[j]; + if sj == q || tj == q { + continue; // already pinned to 0 + } + for eta in 0..2 { + constraints.push(LinearConstraint::le( + vec![(g_idx(q, t, j, eta), 1.0), (j, -1.0)], + 0.0, + )); + } + } + + // Flow conservation for each surviving vertex v ≠ q + for v in 0..n { + if v == q { + continue; + } + let mut terms: Vec<(usize, f64)> = Vec::new(); + + // Base edges + for (i, &(u_e, v_e)) in base_edges.iter().enumerate() { + if u_e == q || v_e == q { + continue; + } + // eta=0 means u->v direction + if u_e == v { + terms.push((f_idx(q, t, i, 0), 1.0)); // outgoing + terms.push((f_idx(q, t, i, 1), -1.0)); // incoming + } + if v_e == v { + terms.push((f_idx(q, t, i, 0), -1.0)); // incoming + terms.push((f_idx(q, t, i, 1), 1.0)); // outgoing + } + } + + // Candidate edges + for (j, &(sj, tj, _)) in self.potential_weights().iter().enumerate() { + if sj == q || tj == q { + continue; + } + // eta=0 means s->t direction + if sj == v { + terms.push((g_idx(q, t, j, 0), 1.0)); + terms.push((g_idx(q, t, j, 1), -1.0)); + } + if tj == v { + terms.push((g_idx(q, t, j, 0), -1.0)); + terms.push((g_idx(q, t, j, 1), 1.0)); + } + } + + let rhs = if v == root { + 1.0 + } else if v == t { + -1.0 + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionBiconnAugToILP { + target, + num_candidates: p, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "biconnectivityaugmentation_to_ilp", + build: || { + // Path 0-1-2-3, candidates: (0,2,1),(0,3,2),(1,3,1), budget=3 + let source = BiconnectivityAugmentation::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![(0, 2, 1), (0, 3, 2), (1, 3, 1)], + 3, + ); + let reduction: ReductionBiconnAugToILP = + crate::rules::ReduceTo::>::reduce_to(&source); + let ilp_sol = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config: ilp_sol, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/biconnectivityaugmentation_ilp.rs"] +mod tests; diff --git a/src/rules/bmf_ilp.rs b/src/rules/bmf_ilp.rs new file mode 100644 index 00000000..3f36616d --- /dev/null +++ b/src/rules/bmf_ilp.rs @@ -0,0 +1,151 @@ +//! Reduction from BMF (Boolean Matrix Factorization) to ILP. +//! +//! Variables: binary b_{i,r}, c_{r,j}, McCormick product p_{i,r,j} = b_{i,r} * c_{r,j}, +//! reconstructed entry w_{i,j} = OR_r p_{i,r,j}, error e_{i,j} = |A_{i,j} - w_{i,j}|. +//! Minimize sum of errors. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, BMF, ILP}; +use crate::reduction; +use crate::rules::ilp_helpers::mccormick_product; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionBMFToILP { + target: ILP, + m: usize, + n: usize, + k: usize, +} + +impl ReductionResult for ReductionBMFToILP { + type Source = BMF; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Extract B (m x k) then C (k x n) — first m*k + k*n variables + let total = self.m * self.k + self.k * self.n; + target_solution[..total].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "rows * rank + rank * cols + rows * rank * cols + rows * cols + rows * cols", + num_constraints = "3 * rows * rank * cols + rank * rows * cols + rows * cols + 2 * rows * cols", + } +)] +impl ReduceTo> for BMF { + type Result = ReductionBMFToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.rows(); + let n = self.cols(); + let k = self.rank(); + + // Variable layout: + // b_{i,r}: m*k variables at indices [0, m*k) + // c_{r,j}: k*n variables at indices [m*k, m*k + k*n) + // p_{i,r,j}: m*k*n variables at indices [m*k + k*n, m*k + k*n + m*k*n) + // w_{i,j}: m*n variables at indices [m*k + k*n + m*k*n, m*k + k*n + m*k*n + m*n) + // e_{i,j}: m*n variables at indices [m*k + k*n + m*k*n + m*n, ...) + let b_offset = 0; + let c_offset = m * k; + let p_offset = m * k + k * n; + let w_offset = p_offset + m * k * n; + let e_offset = w_offset + m * n; + let num_vars = e_offset + m * n; + + let mut constraints = Vec::new(); + + for i in 0..m { + for j in 0..n { + for r in 0..k { + let p_idx = p_offset + i * k * n + r * n + j; + let b_idx = b_offset + i * k + r; + let c_idx = c_offset + r * n + j; + + // McCormick: p_{i,r,j} = b_{i,r} * c_{r,j} + constraints.extend(mccormick_product(p_idx, b_idx, c_idx)); + } + + let w_idx = w_offset + i * n + j; + let e_idx = e_offset + i * n + j; + + // w_{i,j} >= p_{i,r,j} for all r + for r in 0..k { + let p_idx = p_offset + i * k * n + r * n + j; + constraints.push(LinearConstraint::ge(vec![(w_idx, 1.0), (p_idx, -1.0)], 0.0)); + } + + // w_{i,j} <= sum_r p_{i,r,j} + let mut w_upper_terms = vec![(w_idx, 1.0)]; + for r in 0..k { + let p_idx = p_offset + i * k * n + r * n + j; + w_upper_terms.push((p_idx, -1.0)); + } + constraints.push(LinearConstraint::le(w_upper_terms, 0.0)); + + // e_{i,j} >= A_{i,j} - w_{i,j} + let a_val = if self.matrix()[i][j] { 1.0 } else { 0.0 }; + constraints.push(LinearConstraint::ge( + vec![(e_idx, 1.0), (w_idx, 1.0)], + a_val, + )); + + // e_{i,j} >= w_{i,j} - A_{i,j} + constraints.push(LinearConstraint::ge( + vec![(e_idx, 1.0), (w_idx, -1.0)], + -a_val, + )); + } + } + + // Objective: minimize sum e_{i,j} + let objective: Vec<(usize, f64)> = (0..m * n).map(|idx| (e_offset + idx, 1.0)).collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + ReductionBMFToILP { target, m, n, k } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "bmf_to_ilp", + build: || { + // 2x2 identity matrix, rank 2 + let source = BMF::new(vec![vec![true, false], vec![false, true]], 2); + // B = [[1,0],[0,1]], C = [[1,0],[0,1]] + // b: [1,0,0,1], c: [1,0,0,1] + let source_config = vec![1, 0, 0, 1, 1, 0, 0, 1]; + let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&source); + // Build target config by encoding: + // p_{0,0,0}=1, p_{0,0,1}=0, p_{0,1,0}=0, p_{0,1,1}=0 + // p_{1,0,0}=0, p_{1,0,1}=0, p_{1,1,0}=0, p_{1,1,1}=1 + // w: [1,0,0,1], e: [0,0,0,0] + let target_config = vec![ + 1, 0, 0, 1, // B + 1, 0, 0, 1, // C + 1, 0, 0, 0, 0, 0, 0, 1, // P + 1, 0, 0, 1, // W + 0, 0, 0, 0, // E + ]; + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/bmf_ilp.rs"] +mod tests; diff --git a/src/rules/bottlenecktravelingsalesman_ilp.rs b/src/rules/bottlenecktravelingsalesman_ilp.rs new file mode 100644 index 00000000..c86bf15d --- /dev/null +++ b/src/rules/bottlenecktravelingsalesman_ilp.rs @@ -0,0 +1,204 @@ +//! Reduction from BottleneckTravelingSalesman to ILP (Integer Linear Programming). +//! +//! Cyclic position-assignment formulation with bottleneck variable: +//! - Binary x_{v,p}: vertex v at position p (cyclic tour) +//! - Binary z_{e,p,dir}: linearized consecutive-pair products +//! - Integer bottleneck variable b >= w_e * z_{e,p,dir} +//! - Objective: minimize b + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::BottleneckTravelingSalesman; +use crate::reduction; +use crate::rules::ilp_helpers::mccormick_product; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::Graph; + +/// Result of reducing BottleneckTravelingSalesman to ILP. +/// +/// Variable layout (ILP, all non-negative): +/// - `x_{v,p}` at index `v * n + p`, bounded to {0,1} +/// - `z_{e,p,dir}` at index `n^2 + 2*(e*n + p) + dir`, bounded to {0,1} +/// - `b` (bottleneck) at index `n^2 + 2*m*n` +#[derive(Debug, Clone)] +pub struct ReductionBTSPToILP { + target: ILP, + num_vertices: usize, + source_edges: Vec<(usize, usize)>, +} + +impl ReductionResult for ReductionBTSPToILP { + type Source = BottleneckTravelingSalesman; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: decode tour from x variables, then mark selected edges. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_vertices; + + // Decode tour: for each position p, find vertex v with x_{v,p} = 1 + let mut tour = vec![0usize; n]; + for p in 0..n { + for v in 0..n { + if target_solution[v * n + p] == 1 { + tour[p] = v; + break; + } + } + } + + // Map tour to edge selection + let mut edge_selection = vec![0usize; self.source_edges.len()]; + for p in 0..n { + let u = tour[p]; + let v = tour[(p + 1) % n]; + for (idx, &(a, b)) in self.source_edges.iter().enumerate() { + if (a == u && b == v) || (a == v && b == u) { + edge_selection[idx] = 1; + break; + } + } + } + + edge_selection + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices^2 + 2 * num_edges * num_vertices + 1", + num_constraints = "2 * num_vertices + num_vertices^2 + 2 * num_edges * num_vertices + 6 * num_edges * num_vertices + num_vertices + 2 * num_edges * num_vertices", + } +)] +impl ReduceTo> for BottleneckTravelingSalesman { + type Result = ReductionBTSPToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges = graph.edges(); + let m = edges.len(); + let weights = self.weights(); + + let num_x = n * n; + let num_z = 2 * m * n; + let b_idx = num_x + num_z; + let num_vars = num_x + num_z + 1; + + let x_idx = |v: usize, p: usize| -> usize { v * n + p }; + let z_fwd_idx = |e: usize, p: usize| -> usize { num_x + 2 * (e * n + p) }; + let z_rev_idx = |e: usize, p: usize| -> usize { num_x + 2 * (e * n + p) + 1 }; + + let mut constraints = Vec::new(); + + // Assignment: each vertex in exactly one position + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Assignment: each position has exactly one vertex + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Binary bounds for x variables (ILP is non-negative integer) + for idx in 0..num_x { + constraints.push(LinearConstraint::le(vec![(idx, 1.0)], 1.0)); + } + + // Binary bounds for z variables + for idx in 0..num_z { + constraints.push(LinearConstraint::le(vec![(num_x + idx, 1.0)], 1.0)); + } + + // McCormick linearization for z variables (cyclic: position (p+1) mod n) + for (e, &(u, v)) in edges.iter().enumerate() { + for p in 0..n { + let p_next = (p + 1) % n; + // Forward: z_fwd = x_{u,p} * x_{v,p_next} + constraints.extend(mccormick_product( + z_fwd_idx(e, p), + x_idx(u, p), + x_idx(v, p_next), + )); + // Reverse: z_rev = x_{v,p} * x_{u,p_next} + constraints.extend(mccormick_product( + z_rev_idx(e, p), + x_idx(v, p), + x_idx(u, p_next), + )); + } + } + + // Adjacency: for each position p, exactly one edge in either direction + for p in 0..n { + let mut terms = Vec::new(); + for e in 0..m { + terms.push((z_fwd_idx(e, p), 1.0)); + terms.push((z_rev_idx(e, p), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Bottleneck: b >= w_e * z_{e,p,dir} for all e, p, dir + for (e, &w) in weights.iter().enumerate() { + let w_f64 = w as f64; + for p in 0..n { + constraints.push(LinearConstraint::ge( + vec![(b_idx, 1.0), (z_fwd_idx(e, p), -w_f64)], + 0.0, + )); + constraints.push(LinearConstraint::ge( + vec![(b_idx, 1.0), (z_rev_idx(e, p), -w_f64)], + 0.0, + )); + } + } + + // Objective: minimize b + let objective = vec![(b_idx, 1.0)]; + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionBTSPToILP { + target, + num_vertices: n, + source_edges: edges, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "bottlenecktravelingsalesman_to_ilp", + build: || { + // C4 with varying weights + let source = BottleneckTravelingSalesman::new( + crate::topology::SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]), + vec![1, 2, 3, 4], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/bottlenecktravelingsalesman_ilp.rs"] +mod tests; diff --git a/src/rules/boundedcomponentspanningforest_ilp.rs b/src/rules/boundedcomponentspanningforest_ilp.rs new file mode 100644 index 00000000..7688ddff --- /dev/null +++ b/src/rules/boundedcomponentspanningforest_ilp.rs @@ -0,0 +1,220 @@ +//! Reduction from BoundedComponentSpanningForest to ILP. +//! +//! Assign every vertex to one of K components, bound weight, certify +//! connectivity inside each used component via flow. +//! See the paper entry for the full formulation. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::BoundedComponentSpanningForest; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +#[derive(Debug, Clone)] +pub struct ReductionBCSFToILP { + target: ILP, + n: usize, + k: usize, +} + +impl ReductionResult for ReductionBCSFToILP { + type Source = BoundedComponentSpanningForest; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// One-hot decode: for each vertex v, output the unique component c with x_{v,c} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + let k = self.k; + (0..n) + .map(|v| { + (0..k) + .find(|&c| target_solution[v * k + c] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "3 * num_vertices * max_components + 2 * max_components + 2 * num_edges * max_components", + num_constraints = "num_vertices + max_components + max_components + 2 * max_components + num_vertices * max_components + 4 * num_vertices * max_components + 4 * num_edges * max_components + num_vertices * max_components", + } +)] +impl ReduceTo> for BoundedComponentSpanningForest { + type Result = ReductionBCSFToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let edges = self.graph().edges(); + let m = edges.len(); + let k = self.max_components(); + + let x_idx = |v: usize, c: usize| -> usize { v * k + c }; + let u_idx = |c: usize| -> usize { n * k + c }; + let r_idx = |v: usize, c: usize| -> usize { n * k + k + v * k + c }; + let s_idx = |c: usize| -> usize { 2 * n * k + k + c }; + let b_idx = |v: usize, c: usize| -> usize { 2 * n * k + 2 * k + v * k + c }; + let f_idx = + |i: usize, eta: usize, c: usize| -> usize { 3 * n * k + 2 * k + (i * 2 + eta) * k + c }; + + let num_vars = 3 * n * k + 2 * k + 2 * m * k; + let n_f64 = n as f64; + let mut constraints = Vec::new(); + + // 1) Assignment: sum_c x_{v,c} = 1 for each vertex v + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..k).map(|c| (x_idx(v, c), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2) Weight bound: sum_v w_v * x_{v,c} <= B for each component c + for c in 0..k { + let terms: Vec<(usize, f64)> = self + .weights() + .iter() + .enumerate() + .map(|(v, &w)| (x_idx(v, c), w as f64)) + .collect(); + constraints.push(LinearConstraint::le(terms, *self.max_weight() as f64)); + } + + // 3) Size: s_c = sum_v x_{v,c} + for c in 0..k { + let mut terms: Vec<(usize, f64)> = vec![(s_idx(c), -1.0)]; + for v in 0..n { + terms.push((x_idx(v, c), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // 4) Nonempty indicator: u_c <= s_c and s_c <= n * u_c + for c in 0..k { + constraints.push(LinearConstraint::le( + vec![(u_idx(c), 1.0), (s_idx(c), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(s_idx(c), 1.0), (u_idx(c), -n_f64)], + 0.0, + )); + } + + // 5) Root selection: sum_v r_{v,c} = u_c and r_{v,c} <= x_{v,c} + for c in 0..k { + let mut terms: Vec<(usize, f64)> = (0..n).map(|v| (r_idx(v, c), 1.0)).collect(); + terms.push((u_idx(c), -1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(r_idx(v, c), 1.0), (x_idx(v, c), -1.0)], + 0.0, + )); + } + } + + // 6) Product linearization: b_{v,c} = s_c * r_{v,c} + for v in 0..n { + for c in 0..k { + // b <= s_c + constraints.push(LinearConstraint::le( + vec![(b_idx(v, c), 1.0), (s_idx(c), -1.0)], + 0.0, + )); + // b <= n * r + constraints.push(LinearConstraint::le( + vec![(b_idx(v, c), 1.0), (r_idx(v, c), -n_f64)], + 0.0, + )); + // b >= s - n*(1-r) => b - s - n*r >= -n + constraints.push(LinearConstraint::ge( + vec![(b_idx(v, c), 1.0), (s_idx(c), -1.0), (r_idx(v, c), -n_f64)], + -n_f64, + )); + // b >= 0 + constraints.push(LinearConstraint::ge(vec![(b_idx(v, c), 1.0)], 0.0)); + } + } + + // 7) Flow capacity: 0 <= f_{i,eta,c} <= (n-1)*x_{u_i,c} and <= (n-1)*x_{v_i,c} + let cap = (n as f64) - 1.0; + for (i, &(u_e, v_e)) in edges.iter().enumerate() { + for eta in 0..2usize { + for c in 0..k { + constraints.push(LinearConstraint::ge(vec![(f_idx(i, eta, c), 1.0)], 0.0)); + constraints.push(LinearConstraint::le( + vec![(f_idx(i, eta, c), 1.0), (x_idx(u_e, c), -cap)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(f_idx(i, eta, c), 1.0), (x_idx(v_e, c), -cap)], + 0.0, + )); + } + } + } + + // 8) Flow conservation: net_flow(v,c) = b_{v,c} - x_{v,c} + for v in 0..n { + for c in 0..k { + let mut terms: Vec<(usize, f64)> = Vec::new(); + + for (i, &(u_e, v_e)) in edges.iter().enumerate() { + if u_e == v { + terms.push((f_idx(i, 0, c), 1.0)); + terms.push((f_idx(i, 1, c), -1.0)); + } + if v_e == v { + terms.push((f_idx(i, 0, c), -1.0)); + terms.push((f_idx(i, 1, c), 1.0)); + } + } + + terms.push((b_idx(v, c), -1.0)); + terms.push((x_idx(v, c), 1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionBCSFToILP { target, n, k } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "boundedcomponentspanningforest_to_ilp", + build: || { + let source = BoundedComponentSpanningForest::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 2, 2, 1], + 2, + 4, + ); + let reduction: ReductionBCSFToILP = + crate::rules::ReduceTo::>::reduce_to(&source); + let ilp_sol = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config: ilp_sol, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/boundedcomponentspanningforest_ilp.rs"] +mod tests; diff --git a/src/rules/consecutiveblockminimization_ilp.rs b/src/rules/consecutiveblockminimization_ilp.rs new file mode 100644 index 00000000..dc7e3bcb --- /dev/null +++ b/src/rules/consecutiveblockminimization_ilp.rs @@ -0,0 +1,187 @@ +//! Reduction from ConsecutiveBlockMinimization to ILP. +//! +//! Permute columns with a one-hot assignment and count row-wise block starts +//! by detecting each 0-to-1 transition after permutation. + +use crate::models::algebraic::{ + ConsecutiveBlockMinimization, LinearConstraint, ObjectiveSense, ILP, +}; +use crate::reduction; +use crate::rules::ilp_helpers::{one_hot_assignment_constraints, one_hot_decode}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionCBMToILP { + target: ILP, + num_cols: usize, +} + +impl ReductionResult for ReductionCBMToILP { + type Source = ConsecutiveBlockMinimization; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Decode the column permutation from x_{c,p} + one_hot_decode(target_solution, self.num_cols, self.num_cols, 0) + } +} + +#[reduction( + overhead = { + num_vars = "num_cols * num_cols + num_rows * num_cols + num_rows * num_cols", + num_constraints = "num_cols + num_cols + num_rows * num_cols + num_rows + num_rows * num_cols + 1", + } +)] +impl ReduceTo> for ConsecutiveBlockMinimization { + type Result = ReductionCBMToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_rows(); + let n = self.num_cols(); + + // Variable layout: + // x_{c,p}: n*n variables at indices [0, n*n) + // x_{c*n + p} = 1 iff column c goes to position p + // a_{r,p}: m*n variables at indices [n*n, n*n + m*n) + // value seen by row r at position p + // b_{r,p}: m*n variables at indices [n*n + m*n, n*n + 2*m*n) + // block-start indicator + let x_offset = 0; + let a_offset = n * n; + let b_offset = n * n + m * n; + let num_vars = n * n + 2 * m * n; + + let mut constraints = Vec::new(); + + // One-hot assignment: each column to exactly one position, each position to exactly one column + constraints.extend(one_hot_assignment_constraints(n, n, x_offset)); + + // a_{r,p} = sum_c A_{r,c} * x_{c,p} for all r, p + for r in 0..m { + for p in 0..n { + let a_idx = a_offset + r * n + p; + // a_{r,p} - sum_c A_{r,c} * x_{c,p} = 0 + let mut terms = vec![(a_idx, 1.0)]; + for c in 0..n { + if self.matrix()[r][c] { + terms.push((x_offset + c * n + p, -1.0)); + } + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + // Block-start indicators + for r in 0..m { + // b_{r,0} = a_{r,0} + let b_idx = b_offset + r * n; + let a_idx = a_offset + r * n; + constraints.push(LinearConstraint::eq(vec![(b_idx, 1.0), (a_idx, -1.0)], 0.0)); + + // b_{r,p} >= a_{r,p} - a_{r,p-1} for p > 0 + for p in 1..n { + let b_idx = b_offset + r * n + p; + let a_cur = a_offset + r * n + p; + let a_prev = a_offset + r * n + (p - 1); + constraints.push(LinearConstraint::ge( + vec![(b_idx, 1.0), (a_cur, -1.0), (a_prev, 1.0)], + 0.0, + )); + } + } + + // sum_{r,p} b_{r,p} <= K + let mut bound_terms = Vec::new(); + for r in 0..m { + for p in 0..n { + bound_terms.push((b_offset + r * n + p, 1.0)); + } + } + constraints.push(LinearConstraint::le(bound_terms, self.bound() as f64)); + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionCBMToILP { + target, + num_cols: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "consecutiveblockminimization_to_ilp", + build: || { + // 2x3 matrix, bound=2 + let source = ConsecutiveBlockMinimization::new( + vec![vec![true, false, true], vec![false, true, true]], + 2, + ); + // Permutation [2,0,1] => columns reordered as [2,0,1] + // Row 0: A[0,2]=1, A[0,0]=1, A[0,1]=0 => [1,1,0] => 1 block + // Row 1: A[1,2]=1, A[1,0]=0, A[1,1]=1 => [1,0,1] => 2 blocks + // Total = 3 > 2, try [0,2,1]: + // Row 0: A[0,0]=1, A[0,2]=1, A[0,1]=0 => [1,1,0] => 1 block + // Row 1: A[1,0]=0, A[1,2]=1, A[1,1]=1 => [0,1,1] => 1 block + // Total = 2 <= 2. Good. + let source_config = vec![0, 2, 1]; + let reduction: ReductionCBMToILP = ReduceTo::>::reduce_to(&source); + // Encode x_{c,p}: column c at position p + // c=0 at p=0: x_{0*3+0}=1, c=2 at p=1: x_{2*3+1}=1, c=1 at p=2: x_{1*3+2}=1 + let n = 3; + let mut target_config = vec![0; reduction.target.num_vars]; + // x_{0,0} = 1 + target_config[0 * n + 0] = 1; + // x_{2,1} = 1 + target_config[2 * n + 1] = 1; + // x_{1,2} = 1 + target_config[1 * n + 2] = 1; + // a values + let a_offset = n * n; + let m = 2; + let matrix = vec![vec![true, false, true], vec![false, true, true]]; + let perm = [0, 2, 1]; + for r in 0..m { + for p in 0..n { + if matrix[r][perm[p]] { + target_config[a_offset + r * n + p] = 1; + } + } + } + // b values + let b_offset = n * n + m * n; + for r in 0..m { + for p in 0..n { + let a_cur = if matrix[r][perm[p]] { 1 } else { 0 }; + let a_prev = if p > 0 && matrix[r][perm[p - 1]] { + 1 + } else { + 0 + }; + if p == 0 { + target_config[b_offset + r * n + p] = a_cur; + } else if a_cur > a_prev { + target_config[b_offset + r * n + p] = 1; + } + } + } + + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/consecutiveblockminimization_ilp.rs"] +mod tests; diff --git a/src/rules/consecutiveonesmatrixaugmentation_ilp.rs b/src/rules/consecutiveonesmatrixaugmentation_ilp.rs new file mode 100644 index 00000000..e3ca84e2 --- /dev/null +++ b/src/rules/consecutiveonesmatrixaugmentation_ilp.rs @@ -0,0 +1,205 @@ +//! Reduction from ConsecutiveOnesMatrixAugmentation to ILP. +//! +//! Choose a column permutation and, for each row, choose an interval that will +//! become its consecutive block of 1s. Flips are needed only for zeros inside +//! that interval. + +use crate::models::algebraic::{ + ConsecutiveOnesMatrixAugmentation, LinearConstraint, ObjectiveSense, ILP, +}; +use crate::reduction; +use crate::rules::ilp_helpers::{one_hot_assignment_constraints, one_hot_decode}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionCOMAToILP { + target: ILP, + num_cols: usize, +} + +impl ReductionResult for ReductionCOMAToILP { + type Source = ConsecutiveOnesMatrixAugmentation; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + one_hot_decode(target_solution, self.num_cols, self.num_cols, 0) + } +} + +#[reduction( + overhead = { + num_vars = "num_cols * num_cols + 5 * num_rows * num_cols", + num_constraints = "num_cols + num_cols + num_rows * num_cols + 2 * num_rows + num_rows + 3 * num_rows * num_cols + 4 * num_rows * num_cols + 1", + } +)] +impl ReduceTo> for ConsecutiveOnesMatrixAugmentation { + type Result = ReductionCOMAToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_rows(); + let n = self.num_cols(); + + // Variable layout (all binary): + // x_{c,p}: n^2 at [0, n^2) + // a_{r,p}: m*n at [n^2, n^2 + m*n) + // l_{r,p}: m*n at [n^2 + m*n, n^2 + 2*m*n) + // u_{r,p}: m*n at [n^2 + 2*m*n, n^2 + 3*m*n) + // h_{r,p}: m*n at [n^2 + 3*m*n, n^2 + 4*m*n) + // f_{r,p}: m*n at [n^2 + 4*m*n, n^2 + 5*m*n) + let x_off = 0; + let a_off = n * n; + let l_off = n * n + m * n; + let u_off = n * n + 2 * m * n; + let h_off = n * n + 3 * m * n; + let f_off = n * n + 4 * m * n; + let num_vars = n * n + 5 * m * n; + + let mut constraints = Vec::new(); + + // One-hot permutation assignment + constraints.extend(one_hot_assignment_constraints(n, n, x_off)); + + // a_{r,p} = sum_c A_{r,c} * x_{c,p} + for r in 0..m { + for p in 0..n { + let a_idx = a_off + r * n + p; + let mut terms = vec![(a_idx, 1.0)]; + for c in 0..n { + if self.matrix()[r][c] { + terms.push((x_off + c * n + p, -1.0)); + } + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + // Per-row interval constraints + for r in 0..m { + let beta_r: f64 = if self.matrix()[r].iter().any(|&v| v) { + 1.0 + } else { + 0.0 + }; + + // sum_p l_{r,p} = beta_r + let l_terms: Vec<(usize, f64)> = (0..n).map(|p| (l_off + r * n + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(l_terms, beta_r)); + + // sum_p u_{r,p} = beta_r + let u_terms: Vec<(usize, f64)> = (0..n).map(|p| (u_off + r * n + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(u_terms, beta_r)); + + // sum_p p*l_{r,p} <= sum_p p*u_{r,p} + (n-1)*(1 - beta_r) + // => sum_p p*l_{r,p} - sum_p p*u_{r,p} <= (n-1)*(1 - beta_r) + let mut order_terms = Vec::new(); + for p in 0..n { + order_terms.push((l_off + r * n + p, p as f64)); + order_terms.push((u_off + r * n + p, -(p as f64))); + } + constraints.push(LinearConstraint::le( + order_terms, + (n as f64 - 1.0) * (1.0 - beta_r), + )); + + for p in 0..n { + let h_idx = h_off + r * n + p; + let a_idx = a_off + r * n + p; + let f_idx = f_off + r * n + p; + + // h_{r,p} <= sum_{q=0}^{p} l_{r,q} + let l_prefix: Vec<(usize, f64)> = + (0..=p).map(|q| (l_off + r * n + q, -1.0)).collect(); + let mut h_le_l = vec![(h_idx, 1.0)]; + h_le_l.extend(l_prefix); + constraints.push(LinearConstraint::le(h_le_l, 0.0)); + + // h_{r,p} <= sum_{q=p}^{n-1} u_{r,q} + let u_suffix: Vec<(usize, f64)> = + (p..n).map(|q| (u_off + r * n + q, -1.0)).collect(); + let mut h_le_u = vec![(h_idx, 1.0)]; + h_le_u.extend(u_suffix); + constraints.push(LinearConstraint::le(h_le_u, 0.0)); + + // h_{r,p} >= sum_{q=0}^{p} l_{r,q} + sum_{q=p}^{n-1} u_{r,q} - 1 + let mut h_ge_terms = vec![(h_idx, 1.0)]; + for q in 0..=p { + h_ge_terms.push((l_off + r * n + q, -1.0)); + } + for q in p..n { + h_ge_terms.push((u_off + r * n + q, -1.0)); + } + constraints.push(LinearConstraint::ge(h_ge_terms, -1.0)); + + // a_{r,p} <= h_{r,p} + constraints.push(LinearConstraint::le(vec![(a_idx, 1.0), (h_idx, -1.0)], 0.0)); + + // h_{r,p} <= a_{r,p} + f_{r,p} + constraints.push(LinearConstraint::le( + vec![(h_idx, 1.0), (a_idx, -1.0), (f_idx, -1.0)], + 0.0, + )); + + // f_{r,p} <= h_{r,p} + constraints.push(LinearConstraint::le(vec![(f_idx, 1.0), (h_idx, -1.0)], 0.0)); + + // f_{r,p} + a_{r,p} <= 1 + constraints.push(LinearConstraint::le(vec![(f_idx, 1.0), (a_idx, 1.0)], 1.0)); + } + } + + // Augmentation budget: sum f_{r,p} <= K + let mut budget_terms = Vec::new(); + for r in 0..m { + for p in 0..n { + budget_terms.push((f_off + r * n + p, 1.0)); + } + } + constraints.push(LinearConstraint::le(budget_terms, self.bound() as f64)); + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionCOMAToILP { + target, + num_cols: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "consecutiveonesmatrixaugmentation_to_ilp", + build: || { + let source = ConsecutiveOnesMatrixAugmentation::new( + vec![vec![true, false, true], vec![false, true, true]], + 1, + ); + // Identity permutation [0,1,2]: + // Row 0: [1,0,1] needs 1 flip (the middle 0), cost=1 + // Row 1: [0,1,1] needs 0 flips, cost=0 + // Total = 1 <= 1 + let source_config = vec![0, 1, 2]; + let reduction: ReductionCOMAToILP = ReduceTo::>::reduce_to(&source); + let ilp_solver = crate::solvers::ILPSolver::new(); + let target_config = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/consecutiveonesmatrixaugmentation_ilp.rs"] +mod tests; diff --git a/src/rules/consecutiveonessubmatrix_ilp.rs b/src/rules/consecutiveonessubmatrix_ilp.rs new file mode 100644 index 00000000..0703410b --- /dev/null +++ b/src/rules/consecutiveonessubmatrix_ilp.rs @@ -0,0 +1,228 @@ +//! Reduction from ConsecutiveOnesSubmatrix to ILP. +//! +//! Select exactly K columns, permute only those selected columns, and require +//! every row to have a single consecutive block within the chosen submatrix. +//! The output is the column-selection bits s_c. + +use crate::models::algebraic::{ConsecutiveOnesSubmatrix, LinearConstraint, ObjectiveSense, ILP}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionCOSToILP { + target: ILP, + num_cols: usize, +} + +impl ReductionResult for ReductionCOSToILP { + type Source = ConsecutiveOnesSubmatrix; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Output the selection bits s_c (first num_cols variables) + target_solution[..self.num_cols].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_cols + num_cols * bound + 5 * num_rows * bound", + num_constraints = "1 + num_cols + bound + num_rows * bound + 2 * num_rows + num_rows + 3 * num_rows * bound + 4 * num_rows * bound", + } +)] +impl ReduceTo> for ConsecutiveOnesSubmatrix { + type Result = ReductionCOSToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_rows(); + let n = self.num_cols(); + let k = self.bound() as usize; + + // Variable layout (all binary): + // s_c: n vars at [0, n) — column selection + // x_{c,p}: n*K vars at [n, n + n*K) — column c placed at position p in [0..K) + // a_{r,p}: m*K vars at [n + n*K, n + n*K + m*K) — value at row r, position p + // l_{r,p}: m*K vars — left boundary + // u_{r,p}: m*K vars — right boundary + // h_{r,p}: m*K vars — inside interval + // f_{r,p}: m*K vars — flip indicator (not used for budget, but needed for C1P) + let s_off = 0; + let x_off = n; + let a_off = n + n * k; + let l_off = a_off + m * k; + let u_off = l_off + m * k; + let h_off = u_off + m * k; + let f_off = h_off + m * k; + let num_vars = f_off + m * k; + + let mut constraints = Vec::new(); + + // sum_c s_c = K + let s_terms: Vec<(usize, f64)> = (0..n).map(|c| (s_off + c, 1.0)).collect(); + constraints.push(LinearConstraint::eq(s_terms, k as f64)); + + // sum_p x_{c,p} = s_c for all c + for c in 0..n { + let mut terms: Vec<(usize, f64)> = (0..k).map(|p| (x_off + c * k + p, 1.0)).collect(); + terms.push((s_off + c, -1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // sum_c x_{c,p} = 1 for all p in {0, ..., K-1} + for p in 0..k { + let terms: Vec<(usize, f64)> = (0..n).map(|c| (x_off + c * k + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // a_{r,p} = sum_c A_{r,c} * x_{c,p} + for r in 0..m { + for p in 0..k { + let a_idx = a_off + r * k + p; + let mut terms = vec![(a_idx, 1.0)]; + for c in 0..n { + if self.matrix()[r][c] { + terms.push((x_off + c * k + p, -1.0)); + } + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + // C1P interval constraints on the K-position permuted submatrix + for r in 0..m { + // beta_r = 1 if row r has at least one 1 in the original matrix + // (among any column, not just selected ones — the ILP will determine) + // We use beta_r = 1 for rows that have any 1, to allow intervals + let beta_r: f64 = if self.matrix()[r].iter().any(|&v| v) { + 1.0 + } else { + 0.0 + }; + + // sum_p l_{r,p} = beta_r + let l_terms: Vec<(usize, f64)> = (0..k).map(|p| (l_off + r * k + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(l_terms, beta_r)); + + // sum_p u_{r,p} = beta_r + let u_terms: Vec<(usize, f64)> = (0..k).map(|p| (u_off + r * k + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(u_terms, beta_r)); + + // sum_p p*l_{r,p} <= sum_p p*u_{r,p} + (K-1)*(1 - beta_r) + if k > 0 { + let mut order_terms = Vec::new(); + for p in 0..k { + order_terms.push((l_off + r * k + p, p as f64)); + order_terms.push((u_off + r * k + p, -(p as f64))); + } + constraints.push(LinearConstraint::le( + order_terms, + (k as f64 - 1.0).max(0.0) * (1.0 - beta_r), + )); + } + + for p in 0..k { + let h_idx = h_off + r * k + p; + let a_idx = a_off + r * k + p; + let f_idx = f_off + r * k + p; + + // h_{r,p} <= sum_{q=0}^{p} l_{r,q} + let mut h_le_l = vec![(h_idx, 1.0)]; + for q in 0..=p { + h_le_l.push((l_off + r * k + q, -1.0)); + } + constraints.push(LinearConstraint::le(h_le_l, 0.0)); + + // h_{r,p} <= sum_{q=p}^{K-1} u_{r,q} + let mut h_le_u = vec![(h_idx, 1.0)]; + for q in p..k { + h_le_u.push((u_off + r * k + q, -1.0)); + } + constraints.push(LinearConstraint::le(h_le_u, 0.0)); + + // h_{r,p} >= sum_{q=0}^{p} l_{r,q} + sum_{q=p}^{K-1} u_{r,q} - 1 + let mut h_ge_terms = vec![(h_idx, 1.0)]; + for q in 0..=p { + h_ge_terms.push((l_off + r * k + q, -1.0)); + } + for q in p..k { + h_ge_terms.push((u_off + r * k + q, -1.0)); + } + constraints.push(LinearConstraint::ge(h_ge_terms, -1.0)); + + // a_{r,p} <= h_{r,p} — every 1 must be inside the interval + constraints.push(LinearConstraint::le(vec![(a_idx, 1.0), (h_idx, -1.0)], 0.0)); + + // For C1P (no augmentation): the interval must exactly cover the 1s + // h_{r,p} <= a_{r,p} + f_{r,p} — position inside interval but 0 costs a flip + constraints.push(LinearConstraint::le( + vec![(h_idx, 1.0), (a_idx, -1.0), (f_idx, -1.0)], + 0.0, + )); + + // f_{r,p} <= h_{r,p} + constraints.push(LinearConstraint::le(vec![(f_idx, 1.0), (h_idx, -1.0)], 0.0)); + + // f_{r,p} + a_{r,p} <= 1 + constraints.push(LinearConstraint::le(vec![(f_idx, 1.0), (a_idx, 1.0)], 1.0)); + } + } + + // No augmentation allowed: sum f_{r,p} = 0 + // This is the key difference from COMA: C1P requires zero flips + let mut flip_terms = Vec::new(); + for r in 0..m { + for p in 0..k { + flip_terms.push((f_off + r * k + p, 1.0)); + } + } + if !flip_terms.is_empty() { + constraints.push(LinearConstraint::eq(flip_terms, 0.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionCOSToILP { + target, + num_cols: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "consecutiveonessubmatrix_to_ilp", + build: || { + // Tucker matrix (3x4), K=3 + let source = ConsecutiveOnesSubmatrix::new( + vec![ + vec![true, true, false, true], + vec![true, false, true, true], + vec![false, true, true, false], + ], + 3, + ); + let reduction: ReductionCOSToILP = ReduceTo::>::reduce_to(&source); + let ilp_solver = crate::solvers::ILPSolver::new(); + let target_config = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/consecutiveonessubmatrix_ilp.rs"] +mod tests; diff --git a/src/rules/disjointconnectingpaths_ilp.rs b/src/rules/disjointconnectingpaths_ilp.rs new file mode 100644 index 00000000..14985602 --- /dev/null +++ b/src/rules/disjointconnectingpaths_ilp.rs @@ -0,0 +1,202 @@ +//! Reduction from DisjointConnectingPaths to ILP. +//! +//! Binary flow variables `f^k_{e,dir}` per commodity per directed arc orientation. +//! Flow conservation, anti-parallel constraints, and vertex disjointness. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::DisjointConnectingPaths; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::SimpleGraph; + +/// Result of reducing DisjointConnectingPaths to ILP. +/// +/// Variable layout (all binary): +/// - `f^k_{e,dir}` for each commodity k and each directed orientation of each edge. +/// For edge index `e` with endpoints `(u,v)`, direction 0 is u->v and direction 1 is v->u. +/// Index: `k * 2m + 2e + dir` for k in 0..K, e in 0..m, dir in {0,1}. +/// +/// Total: `K * 2m` variables. +#[derive(Debug, Clone)] +pub struct ReductionDCPToILP { + target: ILP, + /// Canonical edge list used during construction. + edges: Vec<(usize, usize)>, + num_commodities: usize, + num_edge_vars_per_commodity: usize, +} + +impl ReductionResult for ReductionDCPToILP { + type Source = DisjointConnectingPaths; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Mark an edge selected iff some orientation carries flow for some commodity. + let m = self.edges.len(); + let mut result = vec![0usize; m]; + for k in 0..self.num_commodities { + for e in 0..m { + let fwd = target_solution[k * self.num_edge_vars_per_commodity + 2 * e]; + let rev = target_solution[k * self.num_edge_vars_per_commodity + 2 * e + 1]; + if fwd == 1 || rev == 1 { + result[e] = 1; + } + } + } + result + } +} + +#[reduction( + overhead = { + num_vars = "num_pairs * 2 * num_edges", + num_constraints = "num_pairs * num_vertices + num_pairs * num_edges + num_edges + num_vertices", + } +)] +impl ReduceTo> for DisjointConnectingPaths { + type Result = ReductionDCPToILP; + + #[allow(clippy::needless_range_loop)] + fn reduce_to(&self) -> Self::Result { + let edges = self.ordered_edges(); + let m = edges.len(); + let n = self.num_vertices(); + let k_count = self.num_pairs(); + + // Variable layout: only flow variables, no MTZ ordering needed for binary flow + let num_flow_vars_per_k = 2 * m; // f^k_{e,dir} + let num_vars = k_count * num_flow_vars_per_k; + + let flow_var = + |k: usize, e: usize, dir: usize| -> usize { k * num_flow_vars_per_k + 2 * e + dir }; + + let mut constraints = Vec::new(); + + // Build adjacency index: for each vertex, which edges are incident + let mut vertex_edges: Vec> = vec![Vec::new(); n]; + for (e, &(u, v)) in edges.iter().enumerate() { + vertex_edges[u].push(e); + vertex_edges[v].push(e); + } + + // Identify terminal vertices + let mut is_terminal = vec![false; n]; + for &(s, t) in self.terminal_pairs() { + is_terminal[s] = true; + is_terminal[t] = true; + } + + for (k, &(s_k, t_k)) in self.terminal_pairs().iter().enumerate() { + // Flow conservation: outflow - inflow = demand at each vertex + for vertex in 0..n { + let mut terms = Vec::new(); + for &e in &vertex_edges[vertex] { + let (eu, _ev) = edges[e]; + if vertex == eu { + // vertex is first endpoint: dir=0 is outgoing, dir=1 is incoming + terms.push((flow_var(k, e, 0), 1.0)); + terms.push((flow_var(k, e, 1), -1.0)); + } else { + // vertex is second endpoint: dir=1 is outgoing, dir=0 is incoming + terms.push((flow_var(k, e, 1), 1.0)); + terms.push((flow_var(k, e, 0), -1.0)); + } + } + + let demand = if vertex == s_k { + 1.0 + } else if vertex == t_k { + -1.0 + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, demand)); + } + + // Anti-parallel: f^k_{e,0} + f^k_{e,1} <= 1 for each edge + for e in 0..m { + constraints.push(LinearConstraint::le( + vec![(flow_var(k, e, 0), 1.0), (flow_var(k, e, 1), 1.0)], + 1.0, + )); + } + } + + // Edge disjointness: each edge is used by at most one commodity + // sum_k (f^k_{e,0} + f^k_{e,1}) <= 1 + for e in 0..m { + let mut terms = Vec::new(); + for k in 0..k_count { + terms.push((flow_var(k, e, 0), 1.0)); + terms.push((flow_var(k, e, 1), 1.0)); + } + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + // Vertex disjointness: for each non-terminal vertex v, + // sum over all commodities k of (outgoing flow from v) <= 1 + for v in 0..n { + if is_terminal[v] { + continue; + } + let mut terms = Vec::new(); + for k in 0..k_count { + for &e in &vertex_edges[v] { + let (eu, _ev) = edges[e]; + if v == eu { + terms.push((flow_var(k, e, 0), 1.0)); + } else { + terms.push((flow_var(k, e, 1), 1.0)); + } + } + } + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionDCPToILP { + target, + edges, + num_commodities: k_count, + num_edge_vars_per_commodity: num_flow_vars_per_k, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "disjointconnectingpaths_to_ilp", + build: || { + // 6 vertices, two vertex-disjoint paths + let source = DisjointConnectingPaths::new( + SimpleGraph::new(6, vec![(0, 1), (1, 2), (3, 4), (4, 5)]), + vec![(0, 2), (3, 5)], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/disjointconnectingpaths_ilp.rs"] +mod tests; diff --git a/src/rules/flowshopscheduling_ilp.rs b/src/rules/flowshopscheduling_ilp.rs new file mode 100644 index 00000000..32fb1a06 --- /dev/null +++ b/src/rules/flowshopscheduling_ilp.rs @@ -0,0 +1,235 @@ +//! Reduction from FlowShopScheduling to ILP. +//! +//! Binary order variables y_{i,j} with y_{i,j}=1 iff job i precedes job j, +//! integer completion-time variables C_{j,q} for each job j and machine q. +//! Machine-chain and big-M disjunctive constraints enforce a valid flow-shop +//! schedule; the deadline becomes a makespan bound. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::FlowShopScheduling; +use crate::reduction; +use crate::rules::ilp_helpers::permutation_to_lehmer; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing FlowShopScheduling to ILP. +/// +/// Variable layout: +/// - `y_{i,j}` for each ordered pair (i,j) with i, + num_jobs: usize, + num_machines: usize, + num_order_vars: usize, +} + +impl ReductionFSSToILP { + fn encode_schedule_as_lehmer(schedule: &[usize]) -> Vec { + let mut available: Vec = (0..schedule.len()).collect(); + let mut config = Vec::with_capacity(schedule.len()); + for &task in schedule { + let digit = available + .iter() + .position(|&c| c == task) + .expect("schedule must be a permutation"); + config.push(digit); + available.remove(digit); + } + config + } +} + +impl ReductionResult for ReductionFSSToILP { + type Source = FlowShopScheduling; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract solution: sort jobs by final-machine completion time C_{j,m-1}, + /// then convert permutation to Lehmer code. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_jobs; + let m = self.num_machines; + let c_offset = self.num_order_vars; + let mut jobs: Vec = (0..n).collect(); + jobs.sort_by_key(|&j| { + let idx = c_offset + j * m + (m - 1); + (target_solution.get(idx).copied().unwrap_or(0), j) + }); + let perm = permutation_to_lehmer(&jobs); + Self::encode_schedule_as_lehmer(&jobs) + .into_iter() + .zip(perm) + .map(|(lehmer, _)| lehmer) + .collect() + } +} + +#[reduction(overhead = { + num_vars = "num_jobs * (num_jobs - 1) / 2 + num_jobs * num_processors", + num_constraints = "num_jobs * (num_jobs - 1) / 2 + num_jobs + num_jobs * (num_processors - 1) + num_jobs * (num_jobs - 1) * num_processors + num_jobs", +})] +impl ReduceTo> for FlowShopScheduling { + type Result = ReductionFSSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_jobs(); + let m = self.num_processors(); + + let num_order_vars = n * n.saturating_sub(1) / 2; + let num_completion_vars = n * m; + let num_vars = num_order_vars + num_completion_vars; + + // Order variable index for pair (i, j) with i < j + let order_var = |i: usize, j: usize| -> usize { + debug_assert!(i < j); + i * (2 * n - i - 1) / 2 + (j - i - 1) + }; + // Completion time variable index for job j, machine q + let c_var = |j: usize, q: usize| -> usize { num_order_vars + j * m + q }; + + let p = self.task_lengths(); + let d = self.deadline(); + + // Big-M: D + max processing time + let max_p = p + .iter() + .flat_map(|row| row.iter()) + .copied() + .max() + .unwrap_or(0); + let big_m = (d + max_p) as f64; + + let mut constraints = Vec::new(); + + // 1. Symmetry: y_{i,j} + y_{j,i} = 1 for all i != j + // Since we only store y_{i,j} for i < j, we enforce y_{i,j} in {0,1} + // via 0 <= y_{i,j} <= 1. + for i in 0..n { + for j in (i + 1)..n { + constraints.push(LinearConstraint::le(vec![(order_var(i, j), 1.0)], 1.0)); + constraints.push(LinearConstraint::ge(vec![(order_var(i, j), 1.0)], 0.0)); + } + } + + // 2. C_{j,0} >= p_{j,0} for all j + for (j, p_j) in p.iter().enumerate() { + constraints.push(LinearConstraint::ge( + vec![(c_var(j, 0), 1.0)], + p_j[0] as f64, + )); + } + + // 3. Machine chain: C_{j,q+1} >= C_{j,q} + p_{j,q+1} for all j, q in 0..m-1 + for (j, p_j) in p.iter().enumerate() { + for q in 0..(m.saturating_sub(1)) { + // C_{j,q+1} - C_{j,q} >= p_{j,q+1} + constraints.push(LinearConstraint::ge( + vec![(c_var(j, q + 1), 1.0), (c_var(j, q), -1.0)], + p_j[q + 1] as f64, + )); + } + } + + // 4. Disjunctive: C_{j,q} >= C_{i,q} + p_{j,q} - M*(1 - y_{i,j}) for i != j, all q + // For i < j: y_{i,j} is the variable. + // C_{j,q} - C_{i,q} + M*y_{i,j} >= p_{j,q} + M ... wrong + // Actually: C_{j,q} >= C_{i,q} + p_{j,q} - M*(1 - y_{i,j}) + // => C_{j,q} - C_{i,q} + M*y_{i,j} >= p_{j,q} ... when y_{i,j}=0 (i NOT before j): inactive + // when y_{i,j}=1 (i before j): C_{j,q} >= C_{i,q} + p_{j,q} + // Wait, this needs reconsideration. The paper says: + // C_{j,q} >= C_{i,q} + p_{j,q} - M*(1 - y_{i,j}) + // => C_{j,q} - C_{i,q} - M*y_{i,j} >= p_{j,q} - M + // No let me expand directly: + // C_{j,q} - C_{i,q} + M*y_{i,j} >= p_{j,q} + M*(0)... hmm + // + // Let me re-derive: C_{j,q} >= C_{i,q} + p_{j,q} - M*(1 - y_{i,j}) + // = C_{j,q} - C_{i,q} + M*(1 - y_{i,j}) >= p_{j,q} + // = C_{j,q} - C_{i,q} + M - M*y_{i,j} >= p_{j,q} + // = C_{j,q} - C_{i,q} - M*y_{i,j} >= p_{j,q} - M + for i in 0..n { + for (j, p_j) in p.iter().enumerate() { + if i == j { + continue; + } + for (q, &p_jq) in p_j.iter().enumerate() { + if i < j { + // y_{i,j} is the variable. When y_{i,j} = 1, i precedes j, + // so C_{j,q} >= C_{i,q} + p_{j,q}. + // C_{j,q} - C_{i,q} - M*y_{i,j} >= p_{j,q} - M + constraints.push(LinearConstraint::ge( + vec![ + (c_var(j, q), 1.0), + (c_var(i, q), -1.0), + (order_var(i, j), -big_m), + ], + p_jq as f64 - big_m, + )); + } else { + // i > j: y_{j,i} is stored. y_{i,j} = 1 - y_{j,i}. + // C_{j,q} >= C_{i,q} + p_{j,q} - M*(1 - (1 - y_{j,i})) + // C_{j,q} >= C_{i,q} + p_{j,q} - M*y_{j,i} + // C_{j,q} - C_{i,q} + M*y_{j,i} >= p_{j,q} + constraints.push(LinearConstraint::ge( + vec![ + (c_var(j, q), 1.0), + (c_var(i, q), -1.0), + (order_var(j, i), big_m), + ], + p_jq as f64, + )); + } + } + } + } + + // 5. Deadline: C_{j,m-1} <= D for all j + if m > 0 { + for j in 0..n { + constraints.push(LinearConstraint::le(vec![(c_var(j, m - 1), 1.0)], d as f64)); + } + } + + ReductionFSSToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_jobs: n, + num_machines: m, + num_order_vars, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "flowshopscheduling_to_ilp", + build: || { + // 2 machines, 3 jobs, deadline 10 + let source = FlowShopScheduling::new(2, vec![vec![2, 3], vec![3, 2], vec![1, 4]], 10); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/flowshopscheduling_ilp.rs"] +mod tests; diff --git a/src/rules/hamiltonianpath_ilp.rs b/src/rules/hamiltonianpath_ilp.rs new file mode 100644 index 00000000..422323d6 --- /dev/null +++ b/src/rules/hamiltonianpath_ilp.rs @@ -0,0 +1,136 @@ +//! Reduction from HamiltonianPath to ILP (Integer Linear Programming). +//! +//! Position-assignment formulation: +//! - Binary x_{v,p}: vertex v at position p +//! - Binary z_{(u,v),p,dir}: linearized product for edge (u,v) at consecutive positions +//! - Assignment: each vertex in exactly one position, each position exactly one vertex +//! - Adjacency: exactly one graph edge between consecutive positions + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::HamiltonianPath; +use crate::reduction; +use crate::rules::ilp_helpers::{ + mccormick_product, one_hot_assignment_constraints, one_hot_decode, +}; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing HamiltonianPath to ILP. +/// +/// Variable layout (all binary): +/// - `x_{v,p}` at index `v * n + p` for `v, p in 0..n` +/// - `z_{e,p,dir}` at index `n^2 + 2*(e*n_pos + p) + dir` for edge `e`, position `p`, +/// direction `dir in {0=forward, 1=reverse}` +#[derive(Debug, Clone)] +pub struct ReductionHamiltonianPathToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionHamiltonianPathToILP { + type Source = HamiltonianPath; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + one_hot_decode(target_solution, self.num_vertices, self.num_vertices, 0) + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices^2 + 2 * num_edges * num_vertices", + num_constraints = "2 * num_vertices + 6 * num_edges * num_vertices + num_vertices", + } +)] +impl ReduceTo> for HamiltonianPath { + type Result = ReductionHamiltonianPathToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges = graph.edges(); + let m = edges.len(); + let n_pos = if n == 0 { 0 } else { n - 1 }; // number of consecutive-position pairs + + let num_x = n * n; + let num_z = 2 * m * n_pos; + let num_vars = num_x + num_z; + + let x_idx = |v: usize, p: usize| -> usize { v * n + p }; + let z_fwd_idx = |e: usize, p: usize| -> usize { num_x + 2 * (e * n_pos + p) }; + let z_rev_idx = |e: usize, p: usize| -> usize { num_x + 2 * (e * n_pos + p) + 1 }; + + let mut constraints = Vec::new(); + + // Assignment: one-hot for vertices and positions + constraints.extend(one_hot_assignment_constraints(n, n, 0)); + + // McCormick linearization for both directions + for (e, &(u, v)) in edges.iter().enumerate() { + for p in 0..n_pos { + // Forward: z_fwd = x_{u,p} * x_{v,p+1} + constraints.extend(mccormick_product( + z_fwd_idx(e, p), + x_idx(u, p), + x_idx(v, p + 1), + )); + // Reverse: z_rev = x_{v,p} * x_{u,p+1} + constraints.extend(mccormick_product( + z_rev_idx(e, p), + x_idx(v, p), + x_idx(u, p + 1), + )); + } + } + + // Adjacency: for each consecutive position pair p, exactly one edge + for p in 0..n_pos { + let mut terms = Vec::new(); + for e in 0..m { + terms.push((z_fwd_idx(e, p), 1.0)); + terms.push((z_rev_idx(e, p), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Feasibility: no objective + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionHamiltonianPathToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "hamiltonianpath_to_ilp", + build: || { + // Path graph: 0-1-2-3 (has Hamiltonian path) + let source = HamiltonianPath::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/hamiltonianpath_ilp.rs"] +mod tests; diff --git a/src/rules/ilp_helpers.rs b/src/rules/ilp_helpers.rs index 826b31f7..516b1158 100644 --- a/src/rules/ilp_helpers.rs +++ b/src/rules/ilp_helpers.rs @@ -4,6 +4,8 @@ //! McCormick products, MTZ orderings, flow conservation, big-M activation, //! absolute-value differentials, minimax bounds, and one-hot decoding. +#![allow(dead_code)] + use crate::models::algebraic::LinearConstraint; /// McCormick linearization: `y = x_a * x_b` (both binary). @@ -67,6 +69,7 @@ pub fn mtz_ordering( /// For each node `u`: `Σ_{(u,v)} f_{uv} - Σ_{(v,u)} f_{vu} = demand[u]`. /// /// `flow_idx` maps an arc index to the ILP variable index for that arc's flow. +#[allow(clippy::needless_range_loop)] pub fn flow_conservation( arcs: &[(usize, usize)], num_nodes: usize, @@ -110,7 +113,10 @@ pub fn abs_diff_le(a_idx: usize, b_idx: usize, z_idx: usize) -> Vec]) -> Vec { +pub fn minimax_constraints( + z_idx: usize, + expr_terms: &[Vec<(usize, f64)>], +) -> Vec { expr_terms .iter() .map(|terms| { diff --git a/src/rules/integralflowhomologousarcs_ilp.rs b/src/rules/integralflowhomologousarcs_ilp.rs new file mode 100644 index 00000000..3d8f41f0 --- /dev/null +++ b/src/rules/integralflowhomologousarcs_ilp.rs @@ -0,0 +1,120 @@ +//! Reduction from IntegralFlowHomologousArcs to ILP. +//! +//! One integer flow variable per arc. Capacity bounds, conservation at +//! non-terminals, homologous-pair equality, and sink inflow requirement. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::IntegralFlowHomologousArcs; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing IntegralFlowHomologousArcs to ILP. +#[derive(Debug, Clone)] +pub struct ReductionIFHAToILP { + target: ILP, +} + +impl ReductionResult for ReductionIFHAToILP { + type Source = IntegralFlowHomologousArcs; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_arcs", + num_constraints = "num_arcs + num_vertices - 2 + 1", + } +)] +impl ReduceTo> for IntegralFlowHomologousArcs { + type Result = ReductionIFHAToILP; + + fn reduce_to(&self) -> Self::Result { + let arcs = self.graph().arcs(); + let num_arcs = self.num_arcs(); + let num_vertices = self.num_vertices(); + let mut constraints = Vec::new(); + + // Capacity: f_a <= c_a for each arc + for (arc_idx, &capacity) in self.capacities().iter().enumerate() { + constraints.push(LinearConstraint::le(vec![(arc_idx, 1.0)], capacity as f64)); + } + + // Conservation: sum_{a in delta^-(v)} f_a = sum_{a in delta^+(v)} f_a + // for all v in V \ {s, t} + for vertex in 0..num_vertices { + if vertex == self.source() || vertex == self.sink() { + continue; + } + let mut terms = Vec::new(); + for (arc_idx, &(u, v)) in arcs.iter().enumerate() { + if v == vertex { + terms.push((arc_idx, 1.0)); // incoming + } + if u == vertex { + terms.push((arc_idx, -1.0)); // outgoing + } + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Homologous equality: f_a = f_b for each pair (a, b) + for &(a, b) in self.homologous_pairs() { + constraints.push(LinearConstraint::eq(vec![(a, 1.0), (b, -1.0)], 0.0)); + } + + // Sink inflow requirement: sum_{a in delta^-(t)} f_a - sum_{a in delta^+(t)} f_a >= R + let mut sink_terms = Vec::new(); + for (arc_idx, &(u, v)) in arcs.iter().enumerate() { + if v == self.sink() { + sink_terms.push((arc_idx, 1.0)); // incoming + } + if u == self.sink() { + sink_terms.push((arc_idx, -1.0)); // outgoing + } + } + constraints.push(LinearConstraint::ge(sink_terms, self.requirement() as f64)); + + ReductionIFHAToILP { + target: ILP::new(num_arcs, constraints, vec![], ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::DirectedGraph; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "integralflowhomologousarcs_to_ilp", + build: || { + let source = IntegralFlowHomologousArcs::new( + DirectedGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + vec![2, 2, 2, 2], + 0, + 3, + 2, + vec![(0, 1)], + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1, 1, 1], + target_config: vec![1, 1, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/integralflowhomologousarcs_ilp.rs"] +mod tests; diff --git a/src/rules/integralflowwithmultipliers_ilp.rs b/src/rules/integralflowwithmultipliers_ilp.rs new file mode 100644 index 00000000..04f849cb --- /dev/null +++ b/src/rules/integralflowwithmultipliers_ilp.rs @@ -0,0 +1,119 @@ +//! Reduction from IntegralFlowWithMultipliers to ILP. +//! +//! One integer flow variable per arc. Capacity bounds, multiplier-scaled +//! conservation at non-terminals, and sink inflow requirement. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::IntegralFlowWithMultipliers; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing IntegralFlowWithMultipliers to ILP. +#[derive(Debug, Clone)] +pub struct ReductionIFWMToILP { + target: ILP, +} + +impl ReductionResult for ReductionIFWMToILP { + type Source = IntegralFlowWithMultipliers; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_arcs", + num_constraints = "num_arcs + num_vertices - 1", + } +)] +impl ReduceTo> for IntegralFlowWithMultipliers { + type Result = ReductionIFWMToILP; + + fn reduce_to(&self) -> Self::Result { + let arcs = self.graph().arcs(); + let num_arcs = self.num_arcs(); + let num_vertices = self.num_vertices(); + let mut constraints = Vec::new(); + + // Capacity: f_a <= c_a for each arc + for (arc_idx, &capacity) in self.capacities().iter().enumerate() { + constraints.push(LinearConstraint::le(vec![(arc_idx, 1.0)], capacity as f64)); + } + + // Multiplier-scaled conservation: + // sum_{a in delta^+(v)} f_a = h(v) * sum_{a in delta^-(v)} f_a + // for all v in V \ {s, t} + // Rewrite: sum_{a in delta^+(v)} f_a - h(v) * sum_{a in delta^-(v)} f_a = 0 + for vertex in 0..num_vertices { + if vertex == self.source() || vertex == self.sink() { + continue; + } + let multiplier = self.multipliers()[vertex] as f64; + let mut terms = Vec::new(); + for (arc_idx, &(u, v)) in arcs.iter().enumerate() { + if u == vertex { + terms.push((arc_idx, 1.0)); // outgoing + } + if v == vertex { + terms.push((arc_idx, -multiplier)); // incoming scaled by -h(v) + } + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Sink inflow requirement: sum_{a in delta^-(t)} f_a - sum_{a in delta^+(t)} f_a >= R + let mut sink_terms = Vec::new(); + for (arc_idx, &(u, v)) in arcs.iter().enumerate() { + if v == self.sink() { + sink_terms.push((arc_idx, 1.0)); // incoming + } + if u == self.sink() { + sink_terms.push((arc_idx, -1.0)); // outgoing + } + } + constraints.push(LinearConstraint::ge(sink_terms, self.requirement() as f64)); + + ReductionIFWMToILP { + target: ILP::new(num_arcs, constraints, vec![], ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::DirectedGraph; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "integralflowwithmultipliers_to_ilp", + build: || { + // Simple diamond: s=0, t=3, intermediate vertices 1,2 with multiplier 1 + let source = IntegralFlowWithMultipliers::new( + DirectedGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + 0, + 3, + vec![1, 1, 1, 1], // source/sink entries ignored + vec![2, 2, 2, 2], + 2, + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1, 1, 1], + target_config: vec![1, 1, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/integralflowwithmultipliers_ilp.rs"] +mod tests; diff --git a/src/rules/isomorphicspanningtree_ilp.rs b/src/rules/isomorphicspanningtree_ilp.rs new file mode 100644 index 00000000..9eefbe6a --- /dev/null +++ b/src/rules/isomorphicspanningtree_ilp.rs @@ -0,0 +1,116 @@ +//! Reduction from IsomorphicSpanningTree to ILP (Integer Linear Programming). +//! +//! Binary variable x_{u,v} with x_{u,v} = 1 iff tree vertex u maps to graph +//! vertex v. Bijection constraints plus non-edge exclusion for every tree edge. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::IsomorphicSpanningTree; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::Graph; + +#[derive(Debug, Clone)] +pub struct ReductionISTToILP { + target: ILP, + n: usize, +} + +impl ReductionResult for ReductionISTToILP { + type Source = IsomorphicSpanningTree; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// For each tree vertex u, output the unique graph vertex v with x_{u,v} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + (0..n) + .map(|u| { + (0..n) + .find(|&v| target_solution[u * n + v] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices * num_vertices", + num_constraints = "2 * num_vertices + 2 * num_tree_edges * num_vertices * num_vertices", + } +)] +impl ReduceTo> for IsomorphicSpanningTree { + type Result = ReductionISTToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let num_vars = n * n; + + let mut constraints = Vec::new(); + + // Each tree vertex u maps to exactly one graph vertex: + // Σ_v x_{u,v} = 1 ∀ u + for u in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (u * n + v, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Each graph vertex v is mapped to by exactly one tree vertex: + // Σ_u x_{u,v} = 1 ∀ v + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|u| (u * n + v, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // For each tree edge {u, w} and each pair (v, z) that is NOT a graph edge: + // x_{u,v} + x_{w,z} <= 1 + // x_{u,z} + x_{w,v} <= 1 + for (u, w) in self.tree().edges() { + for v in 0..n { + for z in 0..n { + if v != z && !self.graph().has_edge(v, z) { + constraints.push(LinearConstraint::le( + vec![(u * n + v, 1.0), (w * n + z, 1.0)], + 1.0, + )); + } + } + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionISTToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::SimpleGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "isomorphicspanningtree_to_ilp", + build: || { + // K4 graph, star tree + let source = IsomorphicSpanningTree::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]), + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]), + ); + // Identity bijection works + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![0, 1, 2, 3], + // x_{0,0}=1, x_{1,1}=1, x_{2,2}=1, x_{3,3}=1 + target_config: vec![1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/isomorphicspanningtree_ilp.rs"] +mod tests; diff --git a/src/rules/lengthboundeddisjointpaths_ilp.rs b/src/rules/lengthboundeddisjointpaths_ilp.rs new file mode 100644 index 00000000..76591836 --- /dev/null +++ b/src/rules/lengthboundeddisjointpaths_ilp.rs @@ -0,0 +1,226 @@ +//! Reduction from LengthBoundedDisjointPaths to ILP. +//! +//! Binary flow variables per commodity per directed edge orientation. +//! Conservation, edge/vertex disjointness, and length bound. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::LengthBoundedDisjointPaths; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing LengthBoundedDisjointPaths to ILP. +/// +/// Variable layout (all binary): +/// - Flow: `f^k_{e,dir}` at index `k * 2m + 2e + dir` +/// +/// Total: `J * 2m` variables. +#[derive(Debug, Clone)] +pub struct ReductionLBDPToILP { + target: ILP, + /// Canonical sorted edges. + edges: Vec<(usize, usize)>, + num_vertices: usize, + num_paths: usize, +} + +impl ReductionResult for ReductionLBDPToILP { + type Source = LengthBoundedDisjointPaths; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // For each path slot k, set the source vertex-indicator block to 1 + // exactly on the vertices incident to the commodity-k path, including s and t. + let m = self.edges.len(); + let n = self.num_vertices; + let j = self.num_paths; + let flow_vars_per_k = 2 * m; + + let mut result = vec![0usize; j * n]; + for k in 0..j { + // Find which vertices are on the path for commodity k + let mut on_path = vec![false; n]; + for e in 0..m { + let (u, v) = self.edges[e]; + let fwd = target_solution[k * flow_vars_per_k + 2 * e]; + let rev = target_solution[k * flow_vars_per_k + 2 * e + 1]; + if fwd == 1 { + on_path[u] = true; + on_path[v] = true; + } + if rev == 1 { + on_path[u] = true; + on_path[v] = true; + } + } + for v in 0..n { + if on_path[v] { + result[k * n + v] = 1; + } + } + } + result + } +} + +#[reduction( + overhead = { + num_vars = "num_paths_required * 2 * num_edges", + num_constraints = "num_paths_required * num_vertices + num_paths_required * num_edges + num_paths_required + num_edges + num_vertices", + } +)] +impl ReduceTo> for LengthBoundedDisjointPaths { + type Result = ReductionLBDPToILP; + + #[allow(clippy::needless_range_loop)] + fn reduce_to(&self) -> Self::Result { + let mut edges: Vec<(usize, usize)> = self + .graph() + .edges() + .into_iter() + .map(|(u, v)| if u <= v { (u, v) } else { (v, u) }) + .collect(); + edges.sort_unstable(); + + let m = edges.len(); + let n = self.num_vertices(); + let j = self.num_paths_required(); + let max_len = self.max_length(); + let s = self.source(); + let t = self.sink(); + + // Only flow variables, no MTZ ordering needed + let flow_vars_per_k = 2 * m; + let num_vars = j * flow_vars_per_k; + + let flow_var = |k: usize, e: usize, dir: usize| k * flow_vars_per_k + 2 * e + dir; + + // Build vertex-to-edge adjacency + let mut vertex_edges: Vec> = vec![Vec::new(); n]; + for (e, &(u, v)) in edges.iter().enumerate() { + vertex_edges[u].push(e); + vertex_edges[v].push(e); + } + + let mut constraints = Vec::new(); + + for k in 0..j { + // Flow conservation + for vertex in 0..n { + let mut terms = Vec::new(); + for &e in &vertex_edges[vertex] { + let (eu, _) = edges[e]; + if vertex == eu { + terms.push((flow_var(k, e, 0), 1.0)); // outgoing + terms.push((flow_var(k, e, 1), -1.0)); // incoming + } else { + terms.push((flow_var(k, e, 1), 1.0)); // outgoing + terms.push((flow_var(k, e, 0), -1.0)); // incoming + } + } + let demand = if vertex == s { + 1.0 + } else if vertex == t { + -1.0 + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, demand)); + } + + // Anti-parallel + for e in 0..m { + constraints.push(LinearConstraint::le( + vec![(flow_var(k, e, 0), 1.0), (flow_var(k, e, 1), 1.0)], + 1.0, + )); + } + + // Length bound: total flow for commodity k <= max_length + let mut len_terms = Vec::new(); + for e in 0..m { + len_terms.push((flow_var(k, e, 0), 1.0)); + len_terms.push((flow_var(k, e, 1), 1.0)); + } + constraints.push(LinearConstraint::le(len_terms, max_len as f64)); + } + + // Edge disjointness: each edge used by at most one commodity + for e in 0..m { + let mut terms = Vec::new(); + for k in 0..j { + terms.push((flow_var(k, e, 0), 1.0)); + terms.push((flow_var(k, e, 1), 1.0)); + } + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + // Vertex disjointness for non-terminal vertices + for v in 0..n { + if v == s || v == t { + continue; + } + let mut terms = Vec::new(); + for k in 0..j { + for &e in &vertex_edges[v] { + let (eu, _) = edges[e]; + if v == eu { + terms.push((flow_var(k, e, 0), 1.0)); + } else { + terms.push((flow_var(k, e, 1), 1.0)); + } + } + } + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionLBDPToILP { + target, + edges, + num_vertices: n, + num_paths: j, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "lengthboundeddisjointpaths_to_ilp", + build: || { + // 4-vertex diamond: s=0, t=3, J=2, K=2 + let source = LengthBoundedDisjointPaths::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + 0, + 3, + 2, + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/lengthboundeddisjointpaths_ilp.rs"] +mod tests; diff --git a/src/rules/longestcircuit_ilp.rs b/src/rules/longestcircuit_ilp.rs new file mode 100644 index 00000000..cacd0824 --- /dev/null +++ b/src/rules/longestcircuit_ilp.rs @@ -0,0 +1,184 @@ +//! Reduction from LongestCircuit to ILP (Integer Linear Programming). +//! +//! Direct cycle-selection formulation: +//! - Binary y_e for edge selection +//! - Binary s_v for vertex on circuit +//! - Degree: sum_{e : v in e} y_e = 2 s_v +//! - At least 3 edges selected +//! - Length bound: sum l_e y_e >= K +//! - Multi-commodity flow connectivity + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::LongestCircuit; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing LongestCircuit to ILP. +/// +/// Variable layout (all binary): +/// - `y_e` for edge e, indices `0..m` +/// - `s_v` for vertex v, indices `m..m+n` +/// - `f^t_{e,dir}` flow for commodity t, indices `m+n..m+n+2m*(n-1)` +#[derive(Debug, Clone)] +pub struct ReductionLongestCircuitToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionLongestCircuitToILP { + type Source = LongestCircuit; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: output the binary edge-selection vector (y_e). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "3 * num_edges + num_vertices + 2 * num_edges * num_vertices", + num_constraints = "num_vertices + 2 + num_vertices^2 + 2 * num_edges * num_vertices", + } +)] +impl ReduceTo> for LongestCircuit { + type Result = ReductionLongestCircuitToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let m = self.num_edges(); + let edges = self.graph().edges(); + let lengths = self.edge_lengths(); + let bound = *self.bound(); + + let y_idx = |e: usize| -> usize { e }; + let s_idx = |v: usize| -> usize { m + v }; + + // Multi-commodity flow for connectivity + let num_commodities = n.saturating_sub(1); + let num_flow = 2 * m * num_commodities; + let num_vars = m + n + num_flow; + + let flow_idx = |commodity: usize, edge: usize, dir: usize| -> usize { + m + n + commodity * 2 * m + 2 * edge + dir + }; + + let mut constraints = Vec::new(); + + // Degree constraints: sum_{e : v in e} y_e = 2 s_v for all v + for v in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for (e, &(u, w)) in edges.iter().enumerate() { + if u == v || w == v { + terms.push((y_idx(e), 1.0)); + } + } + terms.push((s_idx(v), -2.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // At least 3 edges selected + let all_edge_terms: Vec<(usize, f64)> = (0..m).map(|e| (y_idx(e), 1.0)).collect(); + constraints.push(LinearConstraint::ge(all_edge_terms, 3.0)); + + // Length bound: sum l_e y_e >= K + let length_terms: Vec<(usize, f64)> = lengths + .iter() + .enumerate() + .map(|(e, &l)| (y_idx(e), l as f64)) + .collect(); + constraints.push(LinearConstraint::ge(length_terms, bound as f64)); + + // Multi-commodity flow for connectivity + // Root = vertex 0. For each non-root vertex t (commodity index = t-1): + for t in 1..n { + let commodity = t - 1; + + // Flow conservation at each vertex v + for v in 0..n { + let mut terms = Vec::new(); + for (e, &(u, w)) in edges.iter().enumerate() { + // Forward dir: u->w, reverse dir: w->u + if u == v { + terms.push((flow_idx(commodity, e, 0), 1.0)); // outgoing + terms.push((flow_idx(commodity, e, 1), -1.0)); // incoming + } + if w == v { + terms.push((flow_idx(commodity, e, 0), -1.0)); // incoming + terms.push((flow_idx(commodity, e, 1), 1.0)); // outgoing + } + } + + if v == 0 { + // Root: outflow - inflow = s_t + terms.push((s_idx(t), -1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } else if v == t { + // Target: outflow - inflow = -s_t + terms.push((s_idx(t), 1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } else { + // Transit: outflow - inflow = 0 + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + + // Capacity: f^t_{e,dir} <= y_e + for e in 0..m { + constraints.push(LinearConstraint::le( + vec![(flow_idx(commodity, e, 0), 1.0), (y_idx(e), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(flow_idx(commodity, e, 1), 1.0), (y_idx(e), -1.0)], + 0.0, + )); + } + } + + // Feasibility: no objective + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionLongestCircuitToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "longestcircuit_to_ilp", + build: || { + // Triangle with unit lengths, bound 3 + let source = LongestCircuit::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 3, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/longestcircuit_ilp.rs"] +mod tests; diff --git a/src/rules/minimumcutintoboundedsets_ilp.rs b/src/rules/minimumcutintoboundedsets_ilp.rs new file mode 100644 index 00000000..e4196650 --- /dev/null +++ b/src/rules/minimumcutintoboundedsets_ilp.rs @@ -0,0 +1,126 @@ +//! Reduction from MinimumCutIntoBoundedSets to ILP. +//! +//! Binary x_v (1 iff v on sink side), binary y_e (cut indicator). +//! Source pinned to 0, sink pinned to 1. +//! Size bounds: Σ x_v ≤ B, Σ (1-x_v) ≤ B. +//! Cut linking: y_e ≥ x_u - x_v, y_e ≥ x_v - x_u for each edge {u,v}. +//! Cut bound: Σ w_e y_e ≤ K. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MinimumCutIntoBoundedSets; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +#[derive(Debug, Clone)] +pub struct ReductionMinCutBSToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionMinCutBSToILP { + type Source = MinimumCutIntoBoundedSets; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_vertices].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices + num_edges", + num_constraints = "2 + 2 * num_edges + 1", + } +)] +impl ReduceTo> for MinimumCutIntoBoundedSets { + type Result = ReductionMinCutBSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let edges = self.graph().edges(); + let m = edges.len(); + let num_vars = n + m; + let mut constraints = Vec::new(); + + // x_s = 0 + constraints.push(LinearConstraint::eq(vec![(self.source(), 1.0)], 0.0)); + + // x_t = 1 + constraints.push(LinearConstraint::eq(vec![(self.sink(), 1.0)], 1.0)); + + // Σ x_v ≤ B (sink side count) + let all_terms: Vec<(usize, f64)> = (0..n).map(|v| (v, 1.0)).collect(); + constraints.push(LinearConstraint::le(all_terms, self.size_bound() as f64)); + + // Σ (1 - x_v) ≤ B ⟹ n - Σ x_v ≤ B ⟹ -Σ x_v ≤ B - n ⟹ Σ x_v ≥ n - B + let all_terms2: Vec<(usize, f64)> = (0..n).map(|v| (v, 1.0)).collect(); + constraints.push(LinearConstraint::ge( + all_terms2, + (n as f64) - (self.size_bound() as f64), + )); + + // Cut linking: for each edge e = {u, v}, y_e ≥ x_u - x_v and y_e ≥ x_v - x_u + for (e_idx, &(u, v)) in edges.iter().enumerate() { + let y = n + e_idx; + // y_e - x_u + x_v ≥ 0 (y_e ≥ x_u - x_v) + constraints.push(LinearConstraint::ge( + vec![(y, 1.0), (u, -1.0), (v, 1.0)], + 0.0, + )); + // y_e + x_u - x_v ≥ 0 (y_e ≥ x_v - x_u) + constraints.push(LinearConstraint::ge( + vec![(y, 1.0), (u, 1.0), (v, -1.0)], + 0.0, + )); + } + + // Cut bound: Σ w_e y_e ≤ K + let cut_terms: Vec<(usize, f64)> = self + .edge_weights() + .iter() + .enumerate() + .map(|(e_idx, &w)| (n + e_idx, w as f64)) + .collect(); + constraints.push(LinearConstraint::le(cut_terms, *self.cut_bound() as f64)); + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionMinCutBSToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumcutintoboundedsets_to_ilp", + build: || { + let source = MinimumCutIntoBoundedSets::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1], + 0, + 3, + 3, + 2, + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![0, 0, 1, 1], + target_config: vec![0, 0, 1, 1, 0, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumcutintoboundedsets_ilp.rs"] +mod tests; diff --git a/src/rules/minimumtardinesssequencing_ilp.rs b/src/rules/minimumtardinesssequencing_ilp.rs new file mode 100644 index 00000000..8d55c208 --- /dev/null +++ b/src/rules/minimumtardinesssequencing_ilp.rs @@ -0,0 +1,130 @@ +//! Reduction from MinimumTardinessSequencing to ILP. +//! +//! Position-assignment ILP: binary x_{j,p} placing task j in position p, +//! with binary tardy indicator u_j. Precedence constraints and a +//! deadline-based tardy indicator with big-M = n. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::MinimumTardinessSequencing; +use crate::reduction; +use crate::rules::ilp_helpers::{one_hot_decode, permutation_to_lehmer}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing MinimumTardinessSequencing to ILP. +/// +/// Variable layout: +/// - x_{j,p} for j in 0..n, p in 0..n: index `j*n + p` +/// - u_j for j in 0..n: index `n*n + j` +/// +/// Total: n^2 + n variables. +#[derive(Debug, Clone)] +pub struct ReductionMTSToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionResult for ReductionMTSToILP { + type Source = MinimumTardinessSequencing; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: decode position assignment x_{j,p} → permutation → Lehmer code. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + // Decode: for each position p, find which job j has x_{j,p}=1 + let schedule = one_hot_decode(target_solution, n, n, 0); + permutation_to_lehmer(&schedule) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * num_tasks + num_tasks", + num_constraints = "2 * num_tasks + num_precedences + num_tasks", +})] +impl ReduceTo> for MinimumTardinessSequencing { + type Result = ReductionMTSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let num_x_vars = n * n; + let num_u_vars = n; + let num_vars = num_x_vars + num_u_vars; + let big_m = n as f64; + + let x_var = |j: usize, p: usize| -> usize { j * n + p }; + let u_var = |j: usize| -> usize { num_x_vars + j }; + + let mut constraints = Vec::new(); + + // 1. Each task assigned to exactly one position: Σ_p x_{j,p} = 1 for all j + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each position has exactly one task: Σ_j x_{j,p} = 1 for all p + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 3. Precedence: Σ_p p*x_{i,p} + 1 <= Σ_p p*x_{j,p} for each (i,j) + // => Σ_p p*x_{j,p} - Σ_p p*x_{i,p} >= 1 + for &(i, j) in self.precedences() { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for p in 0..n { + terms.push((x_var(j, p), p as f64)); + terms.push((x_var(i, p), -(p as f64))); + } + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + // 4. Tardy indicator: Σ_p (p+1)*x_{j,p} - d_j <= M*u_j for all j + // => Σ_p (p+1)*x_{j,p} - M*u_j <= d_j + for j in 0..n { + let mut terms: Vec<(usize, f64)> = + (0..n).map(|p| (x_var(j, p), (p + 1) as f64)).collect(); + terms.push((u_var(j), -big_m)); + constraints.push(LinearConstraint::le(terms, self.deadlines()[j] as f64)); + } + + // Objective: minimize Σ_j u_j + let objective: Vec<(usize, f64)> = (0..n).map(|j| (u_var(j), 1.0)).collect(); + + ReductionMTSToILP { + target: ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize), + num_tasks: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumtardinesssequencing_to_ilp", + build: || { + let source = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 2)]); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumtardinesssequencing_ilp.rs"] +mod tests; diff --git a/src/rules/mixedchinesepostman_ilp.rs b/src/rules/mixedchinesepostman_ilp.rs new file mode 100644 index 00000000..41300d90 --- /dev/null +++ b/src/rules/mixedchinesepostman_ilp.rs @@ -0,0 +1,405 @@ +//! Reduction from MixedChinesePostman to ILP. +//! +//! Choose an orientation for every undirected edge, then add integer traversal +//! variables on available directed arcs to balance the oriented multigraph +//! within the length bound. Uses connectivity flow constraints on both +//! forward and reverse directions. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::MixedChinesePostman; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::types::WeightElement; + +/// Result of reducing MixedChinesePostman to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMCPToILP { + target: ILP, + num_undirected_edges: usize, +} + +impl ReductionResult for ReductionMCPToILP { + type Source = MixedChinesePostman; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Return the orientation bits d_k in source edge order + target_solution[..self.num_undirected_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_edges + 4 * (num_arcs + 2 * num_edges) + 3 * num_vertices + 1", + num_constraints = "num_vertices + 2 * (num_arcs + 2 * num_edges) + 2 * (num_arcs + 2 * num_edges) + num_vertices + 1 + num_vertices + 4 * num_vertices + 2 * (num_arcs + 2 * num_edges) + 2 * num_vertices + 1", + } +)] +impl ReduceTo> for MixedChinesePostman { + type Result = ReductionMCPToILP; + + #[allow(clippy::needless_range_loop)] + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let m = self.num_arcs(); // original directed arcs + let q = self.num_edges(); // undirected edges + let r_count = m + q; // required traversals + + // If R = 0, empty walk is feasible + if r_count == 0 { + return ReductionMCPToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_undirected_edges: 0, + }; + } + + // Available arc list A*: L = m + 2q arcs + // b_i = a_i for 0 <= i < m + // b_{m+2k} = (u_k, v_k), b_{m+2k+1} = (v_k, u_k) + let original_arcs = self.graph().arcs(); + let undirected_edges = self.graph().edges(); + + let l = m + 2 * q; // total available arcs + + // Build available arc list with lengths + let mut avail_arcs: Vec<(usize, usize)> = Vec::with_capacity(l); + let mut avail_lengths: Vec = Vec::with_capacity(l); + + for (i, &(u, v)) in original_arcs.iter().enumerate() { + avail_arcs.push((u, v)); + avail_lengths.push(self.arc_weights()[i].to_sum() as f64); + } + for (k, &(u, v)) in undirected_edges.iter().enumerate() { + avail_arcs.push((u, v)); // forward + avail_lengths.push(self.edge_weights()[k].to_sum() as f64); + avail_arcs.push((v, u)); // reverse + avail_lengths.push(self.edge_weights()[k].to_sum() as f64); + } + + // Variable layout (from paper): + // d_k: index k (0..q) -- orientation bit + // g_j: index q + j (0..L) -- extra traversals + // y_j: index q + L + j -- binary use indicator + // z_v: index q + 2L + v -- binary vertex activity + // rho_v: index q + 2L + n + v -- root selector + // s: index q + 2L + 2n -- count of active vertices + // b_v: index q + 2L + 2n + 1 + v -- product s*rho_v + // f_j: index q + 2L + 3n + 1 + j -- forward connectivity flow + // h_j: index q + 3L + 3n + 1 + j -- reverse connectivity flow + + let d_idx = |k: usize| k; + let g_idx = |j: usize| q + j; + let y_idx = |j: usize| q + l + j; + let z_idx = |v: usize| q + 2 * l + v; + let rho_idx = |v: usize| q + 2 * l + n + v; + let s_idx = q + 2 * l + 2 * n; + let b_idx = |v: usize| q + 2 * l + 2 * n + 1 + v; + let f_idx = |j: usize| q + 2 * l + 3 * n + 1 + j; + let h_idx = |j: usize| q + 3 * l + 3 * n + 1 + j; + + let num_vars = q + 4 * l + 3 * n + 1; + let big_g = (r_count * (n - 1)) as f64; // G = R(n-1) + let m_use = 1.0 + big_g; // M_use = 1 + G + let n_f64 = n as f64; + + let mut constraints = Vec::new(); + + // Binary bounds for d_k: 0 <= d_k <= 1 + for k in 0..q { + constraints.push(LinearConstraint::le(vec![(d_idx(k), 1.0)], 1.0)); + } + + // Bounds on g_j: 0 <= g_j <= G + for j in 0..l { + constraints.push(LinearConstraint::le(vec![(g_idx(j), 1.0)], big_g)); + } + + // Binary bounds: y_j, z_v, rho_v <= 1 + for j in 0..l { + constraints.push(LinearConstraint::le(vec![(y_idx(j), 1.0)], 1.0)); + } + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(z_idx(v), 1.0)], 1.0)); + constraints.push(LinearConstraint::le(vec![(rho_idx(v), 1.0)], 1.0)); + } + + // The required multiplicity r_j(d): + // For original arcs (0 <= j < m): r_j = 1 (constant) + // For edge k forward (j = m + 2k): r_j = 1 - d_k + // For edge k reverse (j = m + 2k + 1): r_j = d_k + + // Balance constraints: + // sum_{j: tail_j = v} (r_j + g_j) - sum_{j: head_j = v} (r_j + g_j) = 0 for all v + for v in 0..n { + let mut terms = Vec::new(); + let mut constant = 0.0_f64; // constant part of r_j + + for j in 0..l { + let (tail, head) = avail_arcs[j]; + let sign = if tail == v && head == v { + 0.0 // self-loop contributes nothing + } else if tail == v { + 1.0 + } else if head == v { + -1.0 + } else { + continue; + }; + if sign == 0.0 { + continue; + } + + // g_j term + terms.push((g_idx(j), sign)); + + // r_j term + if j < m { + // Original arc: r_j = 1 + constant += sign; + } else { + let k = (j - m) / 2; + if (j - m).is_multiple_of(2) { + // Forward: r_j = 1 - d_k => constant += sign, d_k term += -sign + constant += sign; + terms.push((d_idx(k), -sign)); + } else { + // Reverse: r_j = d_k => d_k term += sign + terms.push((d_idx(k), sign)); + } + } + } + // terms = -constant => 0 + constraints.push(LinearConstraint::eq(terms, -constant)); + } + + // Use indicator: r_j + g_j <= M_use * y_j and y_j <= r_j + g_j + for j in 0..l { + if j < m { + // r_j = 1: (1 + g_j) <= M_use * y_j => g_j - M_use * y_j <= -1 + constraints.push(LinearConstraint::le( + vec![(g_idx(j), 1.0), (y_idx(j), -m_use)], + -1.0, + )); + // y_j <= 1 + g_j => y_j - g_j <= 1 + constraints.push(LinearConstraint::le( + vec![(y_idx(j), 1.0), (g_idx(j), -1.0)], + 1.0, + )); + } else { + let k = (j - m) / 2; + if (j - m).is_multiple_of(2) { + // Forward: r_j = 1 - d_k + // (1 - d_k + g_j) <= M_use * y_j => g_j - d_k - M_use * y_j <= -1 + constraints.push(LinearConstraint::le( + vec![(g_idx(j), 1.0), (d_idx(k), -1.0), (y_idx(j), -m_use)], + -1.0, + )); + // y_j <= 1 - d_k + g_j => y_j + d_k - g_j <= 1 + constraints.push(LinearConstraint::le( + vec![(y_idx(j), 1.0), (d_idx(k), 1.0), (g_idx(j), -1.0)], + 1.0, + )); + } else { + // Reverse: r_j = d_k + // (d_k + g_j) <= M_use * y_j => d_k + g_j - M_use * y_j <= 0 + constraints.push(LinearConstraint::le( + vec![(d_idx(k), 1.0), (g_idx(j), 1.0), (y_idx(j), -m_use)], + 0.0, + )); + // y_j <= d_k + g_j => y_j - d_k - g_j <= 0 + constraints.push(LinearConstraint::le( + vec![(y_idx(j), 1.0), (d_idx(k), -1.0), (g_idx(j), -1.0)], + 0.0, + )); + } + } + } + + // Arc-vertex linking: y_j <= z_{tail_j} and y_j <= z_{head_j} + for j in 0..l { + let (tail, head) = avail_arcs[j]; + constraints.push(LinearConstraint::le( + vec![(y_idx(j), 1.0), (z_idx(tail), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(y_idx(j), 1.0), (z_idx(head), -1.0)], + 0.0, + )); + } + + // z_v <= sum_{j: tail_j=v or head_j=v} y_j + for v in 0..n { + let mut terms = vec![(z_idx(v), 1.0)]; + for j in 0..l { + let (tail, head) = avail_arcs[j]; + if tail == v || head == v { + terms.push((y_idx(j), -1.0)); + } + } + constraints.push(LinearConstraint::le(terms, 0.0)); + } + + // s = sum_v z_v + { + let mut terms = vec![(s_idx, -1.0)]; + for v in 0..n { + terms.push((z_idx(v), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Root selection: sum_v rho_v = 1, rho_v <= z_v + { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (rho_idx(v), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(rho_idx(v), 1.0), (z_idx(v), -1.0)], + 0.0, + )); + } + + // Product linearization: b_v = s * rho_v + // b_v <= s, b_v <= n * rho_v, b_v >= s - n*(1 - rho_v), b_v >= 0 + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(b_idx(v), 1.0), (s_idx, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(b_idx(v), 1.0), (rho_idx(v), -n_f64)], + 0.0, + )); + constraints.push(LinearConstraint::ge( + vec![(b_idx(v), 1.0), (s_idx, -1.0), (rho_idx(v), -n_f64)], + -n_f64, + )); + // b_v >= 0 is implied by ILP non-negativity + } + + // Flow bounds: 0 <= f_j, h_j <= (n-1) * y_j + let flow_big_m = (n as f64) - 1.0; + for j in 0..l { + constraints.push(LinearConstraint::le( + vec![(f_idx(j), 1.0), (y_idx(j), -flow_big_m)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(h_idx(j), 1.0), (y_idx(j), -flow_big_m)], + 0.0, + )); + } + + // Forward flow conservation: + // sum_{j: tail_j=v} f_j - sum_{j: head_j=v} f_j = b_v - z_v for all v + for v in 0..n { + let mut terms = Vec::new(); + for j in 0..l { + let (tail, head) = avail_arcs[j]; + if tail == v { + terms.push((f_idx(j), 1.0)); + } + if head == v { + terms.push((f_idx(j), -1.0)); + } + } + terms.push((b_idx(v), -1.0)); + terms.push((z_idx(v), 1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Reverse flow conservation: + // sum_{j: head_j=v} h_j - sum_{j: tail_j=v} h_j = b_v - z_v for all v + for v in 0..n { + let mut terms = Vec::new(); + for j in 0..l { + let (tail, head) = avail_arcs[j]; + if head == v { + terms.push((h_idx(j), 1.0)); + } + if tail == v { + terms.push((h_idx(j), -1.0)); + } + } + terms.push((b_idx(v), -1.0)); + terms.push((z_idx(v), 1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Length bound: sum_j l_j * (r_j + g_j) <= B + { + let mut terms = Vec::new(); + let mut constant = 0.0_f64; + + for j in 0..l { + let len_j = avail_lengths[j]; + // g_j term + terms.push((g_idx(j), len_j)); + // r_j contribution + if j < m { + constant += len_j; // r_j = 1 + } else { + let k = (j - m) / 2; + if (j - m).is_multiple_of(2) { + // r_j = 1 - d_k + constant += len_j; + terms.push((d_idx(k), -len_j)); + } else { + // r_j = d_k + terms.push((d_idx(k), len_j)); + } + } + } + // sum ... <= B - constant + constraints.push(LinearConstraint::le(terms, *self.bound() as f64 - constant)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionMCPToILP { + target, + num_undirected_edges: q, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + use crate::topology::MixedGraph; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "mixedchinesepostman_to_ilp", + build: || { + // Simple instance: 3 vertices, 1 arc, 1 edge + let source = MixedChinesePostman::new( + MixedGraph::new(3, vec![(0, 1)], vec![(1, 2), (2, 0)]), + vec![1], + vec![1, 1], + 4, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/mixedchinesepostman_ilp.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index e6afb99a..31b4f163 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -50,38 +50,74 @@ pub(crate) mod travelingsalesman_qubo; pub mod unitdiskmapping; #[cfg(feature = "ilp-solver")] -pub(crate) mod ilp_helpers; +pub(crate) mod acyclicpartition_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod balancedcompletebipartitesubgraph_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod bicliquecover_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod biconnectivityaugmentation_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod binpacking_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod bmf_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod bottlenecktravelingsalesman_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod boundedcomponentspanningforest_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod capacityassignment_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod circuit_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod coloring_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod consecutiveblockminimization_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod consecutiveonesmatrixaugmentation_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod consecutiveonessubmatrix_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod consistencyofdatabasefrequencytables_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod directedtwocommodityintegralflow_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod disjointconnectingpaths_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod exactcoverby3sets_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod expectedretrievalcost_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod factoring_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod flowshopscheduling_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod graphpartitioning_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod hamiltonianpath_ilp; +#[cfg(feature = "ilp-solver")] mod ilp_bool_ilp_i32; #[cfg(feature = "ilp-solver")] +pub(crate) mod ilp_helpers; +#[cfg(feature = "ilp-solver")] pub(crate) mod ilp_qubo; #[cfg(feature = "ilp-solver")] pub(crate) mod integralflowbundles_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod integralflowhomologousarcs_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod integralflowwithmultipliers_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod isomorphicspanningtree_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod kclique_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod knapsack_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod lengthboundeddisjointpaths_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod longestcircuit_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod longestcommonsubsequence_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod longestpath_ilp; @@ -94,6 +130,8 @@ pub(crate) mod maximummatching_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod maximumsetpacking_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumcutintoboundedsets_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimumdominatingset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumfeedbackarcset_ilp; @@ -108,38 +146,78 @@ pub(crate) mod minimumsetcovering_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumsummulticenter_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumtardinesssequencing_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minmaxmulticenter_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod mixedchinesepostman_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod multiplecopyfileallocation_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod multiprocessorscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod naesatisfiability_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod optimallineararrangement_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod paintshop_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod partiallyorderedknapsack_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod partitionintopathsoflength2_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod partitionintotriangles_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod pathconstrainednetworkflow_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod precedenceconstrainedscheduling_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod quadraticassignment_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod qubo_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod rectilinearpicturecompression_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod resourceconstrainedscheduling_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod rootedtreestorageassignment_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod ruralpostman_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod schedulingwithindividualdeadlines_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingtominimizemaximumcumulativecost_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod sequencingtominimizeweightedcompletiontime_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingtominimizeweightedtardiness_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod sequencingwithinintervals_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sequencingwithreleasetimesanddeadlines_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod shortestcommonsupersequence_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod shortestweightconstrainedpath_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod sparsematrixcompression_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod stackercrane_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod steinertree_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod steinertreeingraphs_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod stringtostringcorrection_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod strongconnectivityaugmentation_ilp; +#[cfg(feature = "ilp-solver")] +pub(crate) mod subgraphisomorphism_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod sumofsquarespartition_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod timetabledesign_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod travelingsalesman_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod undirectedflowlowerbounds_ilp; @@ -188,53 +266,92 @@ pub(crate) fn canonical_rule_example_specs() -> Vec= p_u - p_v, z_{u,v} >= p_v - p_u +//! - Bound: sum z_{u,v} <= K + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::OptimalLinearArrangement; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; + +/// Result of reducing OptimalLinearArrangement to ILP. +/// +/// Variable layout (ILP, non-negative integers): +/// - `x_{v,p}` at index `v * n + p`, bounded to {0,1} +/// - `p_v` at index `n^2 + v`, integer position in {0, ..., n-1} +/// - `z_e` at index `n^2 + n + e`, non-negative integer for edge length +#[derive(Debug, Clone)] +pub struct ReductionOLAToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionOLAToILP { + type Source = OptimalLinearArrangement; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: for each vertex v, output its position p (the unique p with x_{v,p} = 1). + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_vertices; + (0..n) + .map(|v| { + (0..n) + .find(|&p| target_solution[v * n + p] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_vertices^2 + num_vertices + num_edges", + num_constraints = "2 * num_vertices + num_vertices^2 + num_vertices + num_vertices + 2 * num_edges + 1", + } +)] +impl ReduceTo> for OptimalLinearArrangement { + type Result = ReductionOLAToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let graph = self.graph(); + let edges = graph.edges(); + let m = edges.len(); + let bound = self.bound(); + + let num_x = n * n; + let num_vars = num_x + n + m; + + let x_idx = |v: usize, p: usize| -> usize { v * n + p }; + let p_idx = |v: usize| -> usize { num_x + v }; + let z_idx = |e: usize| -> usize { num_x + n + e }; + + let mut constraints = Vec::new(); + + // Assignment: each vertex in exactly one position + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Assignment: each position has exactly one vertex + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|v| (x_idx(v, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Binary bounds for x variables (ILP) + for v in 0..n { + for p in 0..n { + constraints.push(LinearConstraint::le(vec![(x_idx(v, p), 1.0)], 1.0)); + } + } + + // Position variable linking: p_v = sum_p p * x_{v,p} + // Reformulated as: p_v - sum_p p * x_{v,p} = 0 + for v in 0..n { + let mut terms: Vec<(usize, f64)> = vec![(p_idx(v), 1.0)]; + for p in 0..n { + terms.push((x_idx(v, p), -(p as f64))); + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Position bounds: 0 <= p_v <= n-1 + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(p_idx(v), 1.0)], (n - 1) as f64)); + } + + // Absolute value: z_e >= |p_u - p_v| for each edge e = {u, v} + for (e, &(u, v)) in edges.iter().enumerate() { + // z_e >= p_u - p_v + constraints.push(LinearConstraint::ge( + vec![(z_idx(e), 1.0), (p_idx(u), -1.0), (p_idx(v), 1.0)], + 0.0, + )); + // z_e >= p_v - p_u + constraints.push(LinearConstraint::ge( + vec![(z_idx(e), 1.0), (p_idx(v), -1.0), (p_idx(u), 1.0)], + 0.0, + )); + } + + // Bound: sum z_e <= K + let bound_terms: Vec<(usize, f64)> = (0..m).map(|e| (z_idx(e), 1.0)).collect(); + constraints.push(LinearConstraint::le(bound_terms, bound as f64)); + + // Feasibility: no objective + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionOLAToILP { + target, + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "optimallineararrangement_to_ilp", + build: || { + // Path P4: 0-1-2-3, bound 3 (identity permutation achieves cost 3) + let source = + OptimalLinearArrangement::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), 3); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/optimallineararrangement_ilp.rs"] +mod tests; diff --git a/src/rules/paintshop_ilp.rs b/src/rules/paintshop_ilp.rs new file mode 100644 index 00000000..c43ea8dd --- /dev/null +++ b/src/rules/paintshop_ilp.rs @@ -0,0 +1,147 @@ +//! Reduction from PaintShop to ILP (Integer Linear Programming). +//! +//! Binary variable x_i per car (first-occurrence color), binary k_p per +//! sequence position (actual color), binary c_p per adjacent pair (switch +//! indicator). Minimize Σ c_p. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::PaintShop; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionPaintShopToILP { + target: ILP, + num_cars: usize, +} + +impl ReductionResult for ReductionPaintShopToILP { + type Source = PaintShop; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract first-occurrence color bits (x_i) from ILP solution. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_cars].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_cars + 2 * num_sequence", + num_constraints = "num_sequence + 2 * num_sequence", + } +)] +impl ReduceTo> for PaintShop { + type Result = ReductionPaintShopToILP; + + fn reduce_to(&self) -> Self::Result { + let nc = self.num_cars(); + let seq_len = self.sequence_len(); + + // Variable layout: + // x_i: car first-occurrence color, index i for i in 0..nc + // k_p: actual color at position p, index nc + p for p in 0..seq_len + // c_p: switch indicator at position p, index nc + seq_len + p + let k_offset = nc; + let c_offset = nc + seq_len; + let num_vars = nc + 2 * seq_len; + + let mut constraints = Vec::new(); + + // Determine car index and is_first for each position. + // With config all-zero: first occ gets color 0, second occ gets color 1. + let base = self.get_coloring(&vec![0; nc]); + + // For each car i, find its positions by flipping x_i. + for i in 0..nc { + let mut config = vec![0; nc]; + config[i] = 1; + let flipped = self.get_coloring(&config); + + for p in 0..seq_len { + if flipped[p] != base[p] { + // Position p belongs to car i + if base[p] == 0 { + // First occurrence: k_p = x_i + constraints.push(LinearConstraint::eq( + vec![(k_offset + p, 1.0), (i, -1.0)], + 0.0, + )); + } else { + // Second occurrence: k_p = 1 - x_i => k_p + x_i = 1 + constraints.push(LinearConstraint::eq( + vec![(k_offset + p, 1.0), (i, 1.0)], + 1.0, + )); + } + } + } + } + + // Switch constraints: c_p >= |k_p - k_{p-1}| for p > 0 + for p in 1..seq_len { + // c_p >= k_p - k_{p-1} + constraints.push(LinearConstraint::ge( + vec![ + (c_offset + p, 1.0), + (k_offset + p, -1.0), + (k_offset + p - 1, 1.0), + ], + 0.0, + )); + // c_p >= k_{p-1} - k_p + constraints.push(LinearConstraint::ge( + vec![ + (c_offset + p, 1.0), + (k_offset + p - 1, -1.0), + (k_offset + p, 1.0), + ], + 0.0, + )); + } + + // Objective: minimize Σ c_p for p in 1..seq_len + let objective: Vec<(usize, f64)> = (1..seq_len).map(|p| (c_offset + p, 1.0)).collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + ReductionPaintShopToILP { + target, + num_cars: nc, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "paintshop_to_ilp", + build: || { + // Sequence: A, B, A, C, B, C => 3 cars + let source = PaintShop::new(vec!["A", "B", "A", "C", "B", "C"]); + let reduction: ReductionPaintShopToILP = ReduceTo::>::reduce_to(&source); + let target_config = { + let ilp_solver = crate::solvers::ILPSolver::new(); + ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable") + }; + let source_config = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/paintshop_ilp.rs"] +mod tests; diff --git a/src/rules/pathconstrainednetworkflow_ilp.rs b/src/rules/pathconstrainednetworkflow_ilp.rs new file mode 100644 index 00000000..e0b1db43 --- /dev/null +++ b/src/rules/pathconstrainednetworkflow_ilp.rs @@ -0,0 +1,102 @@ +//! Reduction from PathConstrainedNetworkFlow to ILP. +//! +//! One integer variable per prescribed path. Arc capacity aggregation +//! across paths and total flow requirement. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::PathConstrainedNetworkFlow; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing PathConstrainedNetworkFlow to ILP. +#[derive(Debug, Clone)] +pub struct ReductionPCNFToILP { + target: ILP, +} + +impl ReductionResult for ReductionPCNFToILP { + type Source = PathConstrainedNetworkFlow; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_paths", + num_constraints = "num_arcs + 1", + } +)] +impl ReduceTo> for PathConstrainedNetworkFlow { + type Result = ReductionPCNFToILP; + + fn reduce_to(&self) -> Self::Result { + let num_paths = self.num_paths(); + let num_arcs = self.num_arcs(); + let mut constraints = Vec::new(); + + // Arc capacity: sum_{i : a in P_i} f_i <= c_a for all a + for arc_idx in 0..num_arcs { + let terms: Vec<(usize, f64)> = self + .paths() + .iter() + .enumerate() + .filter(|(_, path)| path.contains(&arc_idx)) + .map(|(path_idx, _)| (path_idx, 1.0)) + .collect(); + if !terms.is_empty() { + constraints.push(LinearConstraint::le( + terms, + self.capacities()[arc_idx] as f64, + )); + } + } + + // Total flow requirement: sum_i f_i >= R + let total_terms: Vec<(usize, f64)> = (0..num_paths).map(|i| (i, 1.0)).collect(); + constraints.push(LinearConstraint::ge(total_terms, self.requirement() as f64)); + + ReductionPCNFToILP { + target: ILP::new(num_paths, constraints, vec![], ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::DirectedGraph; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "pathconstrainednetworkflow_to_ilp", + build: || { + // Simple graph: s=0, t=2, arcs 0->1->2 and 0->2 + // Two paths: [0,1] (0->1->2) and [2] (0->2) + let source = PathConstrainedNetworkFlow::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 0, + 2, + vec![vec![0, 1], vec![2]], + 2, + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![1, 1], + target_config: vec![1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/pathconstrainednetworkflow_ilp.rs"] +mod tests; diff --git a/src/rules/quadraticassignment_ilp.rs b/src/rules/quadraticassignment_ilp.rs new file mode 100644 index 00000000..9a2c907a --- /dev/null +++ b/src/rules/quadraticassignment_ilp.rs @@ -0,0 +1,146 @@ +//! Reduction from QuadraticAssignment to ILP (Integer Linear Programming). +//! +//! Linearized assignment formulation: +//! - Binary x_{i,p}: facility i at location p +//! - Binary z_{(i,p),(j,q)}: product x_{i,p} * x_{j,q} for i != j +//! - Assignment: each facility to exactly one location, each location at most one facility +//! - McCormick linearization for z variables +//! - Objective: minimize sum_{i!=j} C[i][j] * D[p][q] * z_{(i,p),(j,q)} + +use crate::models::algebraic::QuadraticAssignment; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::reduction; +use crate::rules::ilp_helpers::{mccormick_product, one_hot_assignment_constraints}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing QuadraticAssignment to ILP. +/// +/// Variable layout (all binary): +/// - `x_{i,p}` at index `i * m + p` for facility i, location p +/// - `z` variables for McCormick products, indexed sequentially after x +#[derive(Debug, Clone)] +pub struct ReductionQAPToILP { + target: ILP, + num_facilities: usize, + num_locations: usize, +} + +impl ReductionResult for ReductionQAPToILP { + type Source = QuadraticAssignment; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: for each facility i, output the unique location p with x_{i,p} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let loc = self.num_locations; + (0..self.num_facilities) + .map(|i| { + (0..loc) + .find(|&p| target_solution[i * loc + p] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_facilities * num_locations + num_facilities^2 * num_locations^2", + num_constraints = "num_facilities + num_locations + 3 * num_facilities^2 * num_locations^2", + } +)] +impl ReduceTo> for QuadraticAssignment { + type Result = ReductionQAPToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_facilities(); + let loc = self.num_locations(); + let cost = self.cost_matrix(); + let dist = self.distance_matrix(); + + let num_x = n * loc; + + let x_idx = |i: usize, p: usize| -> usize { i * loc + p }; + + // Enumerate z-variable pairs: (i, p, j, q) for i != j + let mut z_pairs = Vec::new(); + for i in 0..n { + for j in 0..n { + if i == j { + continue; + } + for p in 0..loc { + for q in 0..loc { + z_pairs.push((i, p, j, q)); + } + } + } + } + + let num_z = z_pairs.len(); + let num_vars = num_x + num_z; + + let z_idx = |z_seq: usize| -> usize { num_x + z_seq }; + + let mut constraints = Vec::new(); + + // Assignment constraints + constraints.extend(one_hot_assignment_constraints(n, loc, 0)); + + // McCormick linearization for z variables + for (z_seq, &(i, p, j, q)) in z_pairs.iter().enumerate() { + constraints.extend(mccormick_product(z_idx(z_seq), x_idx(i, p), x_idx(j, q))); + } + + // Objective: minimize sum_{i!=j,p,q} C[i][j] * D[p][q] * z_{(i,p),(j,q)} + let mut objective = Vec::new(); + for (z_seq, &(i, p, j, q)) in z_pairs.iter().enumerate() { + let coeff = cost[i][j] as f64 * dist[p][q] as f64; + if coeff != 0.0 { + objective.push((z_idx(z_seq), coeff)); + } + } + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionQAPToILP { + target, + num_facilities: n, + num_locations: loc, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "quadraticassignment_to_ilp", + build: || { + // 2x2 QAP: 2 facilities, 2 locations + let source = QuadraticAssignment::new( + vec![vec![0, 1], vec![1, 0]], + vec![vec![0, 2], vec![2, 0]], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/quadraticassignment_ilp.rs"] +mod tests; diff --git a/src/rules/resourceconstrainedscheduling_ilp.rs b/src/rules/resourceconstrainedscheduling_ilp.rs new file mode 100644 index 00000000..28f6ffb3 --- /dev/null +++ b/src/rules/resourceconstrainedscheduling_ilp.rs @@ -0,0 +1,127 @@ +//! Reduction from ResourceConstrainedScheduling to ILP. +//! +//! Time-indexed binary formulation: x_{j,t} = 1 iff task j runs in slot t. +//! Each task in exactly one slot; processor capacity and resource bounds +//! enforced per time slot. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::ResourceConstrainedScheduling; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ResourceConstrainedScheduling to ILP. +/// +/// Variable layout: x_{j,t} at index `j * D + t` +/// for j in 0..n, t in 0..D. +#[derive(Debug, Clone)] +pub struct ReductionRCSToILP { + target: ILP, + num_tasks: usize, + deadline: usize, +} + +impl ReductionResult for ReductionRCSToILP { + type Source = ResourceConstrainedScheduling; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: for each task j, find the unique slot t with x_{j,t} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let d = self.deadline; + (0..self.num_tasks) + .map(|j| { + (0..d) + .find(|&t| target_solution.get(j * d + t).copied().unwrap_or(0) == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * deadline", + num_constraints = "num_tasks + deadline + num_resources * deadline", +})] +impl ReduceTo> for ResourceConstrainedScheduling { + type Result = ReductionRCSToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let d = self.deadline() as usize; + let r = self.num_resources(); + let m = self.num_processors(); + let num_vars = n * d; + + let var = |j: usize, t: usize| -> usize { j * d + t }; + + let mut constraints = Vec::new(); + + // 1. Each task in exactly one slot: Σ_t x_{j,t} = 1 for all j + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..d).map(|t| (var(j, t), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Processor capacity: Σ_j x_{j,t} <= m for each time slot t + for t in 0..d { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (var(j, t), 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, m as f64)); + } + + // 3. Resource bounds: Σ_j r_{j,q} * x_{j,t} <= B_q for all q, t + for q in 0..r { + for t in 0..d { + let terms: Vec<(usize, f64)> = (0..n) + .map(|j| (var(j, t), self.resource_requirements()[j][q] as f64)) + .collect(); + constraints.push(LinearConstraint::le( + terms, + self.resource_bounds()[q] as f64, + )); + } + } + + ReductionRCSToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_tasks: n, + deadline: d, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "resourceconstrainedscheduling_to_ilp", + build: || { + // 6 tasks, 3 processors, 1 resource with bound 20, deadline 2 + let source = ResourceConstrainedScheduling::new( + 3, + vec![20], + vec![vec![6], vec![7], vec![7], vec![6], vec![8], vec![6]], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/resourceconstrainedscheduling_ilp.rs"] +mod tests; diff --git a/src/rules/rootedtreestorageassignment_ilp.rs b/src/rules/rootedtreestorageassignment_ilp.rs new file mode 100644 index 00000000..ee137d38 --- /dev/null +++ b/src/rules/rootedtreestorageassignment_ilp.rs @@ -0,0 +1,440 @@ +//! Reduction from RootedTreeStorageAssignment to ILP (Integer Linear Programming). +//! +//! Uses parent indicators p_{v,u}, depth variables d_v, ancestor indicators +//! a_{u,v}, transitive-closure helpers h_{u,v,w}, and per-subset gadgets +//! (top/bottom selectors, pair selectors, endpoint depths, extension costs). + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::set::RootedTreeStorageAssignment; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +// Index helpers + +fn idx_p(n: usize, v: usize, u: usize) -> usize { + v * n + u +} + +fn idx_d(n: usize, v: usize) -> usize { + n * n + v +} + +fn idx_a(n: usize, u: usize, v: usize) -> usize { + n * n + n + u * n + v +} + +fn idx_h(n: usize, u: usize, v: usize, w: usize) -> usize { + 2 * n * n + n + (u * n + v) * n + w +} + +fn idx_t(n: usize, r: usize, s: usize, u: usize) -> usize { + let _ = r; + n * n * n + 2 * n * n + n + s * n + u +} + +fn idx_b(n: usize, r: usize, s: usize, v: usize) -> usize { + n * n * n + 2 * n * n + n + r * n + s * n + v +} + +fn idx_m(n: usize, r: usize, s: usize, u: usize, v: usize) -> usize { + n * n * n + 2 * n * n + n + 2 * r * n + s * n * n + u * n + v +} + +fn idx_big_t(n: usize, r: usize, s: usize) -> usize { + n * n * n + 2 * n * n + n + 2 * r * n + r * n * n + s +} + +fn idx_big_b(n: usize, r: usize, s: usize) -> usize { + n * n * n + 2 * n * n + n + 2 * r * n + r * n * n + r + s +} + +fn idx_c(n: usize, r: usize, s: usize) -> usize { + n * n * n + 2 * n * n + n + 2 * r * n + r * n * n + 2 * r + s +} + +fn total_vars(n: usize, r: usize) -> usize { + n * n * n + 2 * n * n + n + r * (n * n + 2 * n + 3) +} + +#[derive(Debug, Clone)] +pub struct ReductionRTSAToILP { + target: ILP, + n: usize, +} + +impl ReductionResult for ReductionRTSAToILP { + type Source = RootedTreeStorageAssignment; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Decode parent array from one-hot parent indicators p_{v,u}. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + (0..n) + .map(|v| { + (0..n) + .find(|&u| target_solution[idx_p(n, v, u)] == 1) + .unwrap_or(v) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "universe_size * universe_size * universe_size + 2 * universe_size * universe_size + universe_size + num_subsets * (universe_size * universe_size + 2 * universe_size + 3)", + num_constraints = "universe_size * universe_size * universe_size + universe_size * universe_size + universe_size * universe_size + num_subsets * universe_size * universe_size", + } +)] +impl ReduceTo> for RootedTreeStorageAssignment { + type Result = ReductionRTSAToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.universe_size(); + let subsets = self.subsets(); + let bound = self.bound(); + + // Nontrivial subsets (size >= 2) + let nontrivial: Vec = (0..subsets.len()) + .filter(|&k| subsets[k].len() >= 2) + .collect(); + let r = nontrivial.len(); + + if n == 0 { + return ReductionRTSAToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + n, + }; + } + + let nv = total_vars(n, r); + let big_m = n as f64; + let big_m_depth = (n - 1) as f64; + + let mut constraints = Vec::new(); + + // === Rooted-tree constraints === + + // Σ_u p_{v,u} = 1 ∀ v + for v in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|u| (idx_p(n, v, u), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Σ_v p_{v,v} = 1 (exactly one root) + let root_terms: Vec<(usize, f64)> = (0..n).map(|v| (idx_p(n, v, v), 1.0)).collect(); + constraints.push(LinearConstraint::eq(root_terms, 1.0)); + + // p_{v,u} binary: upper bound p_{v,u} <= 1 + for v in 0..n { + for u in 0..n { + constraints.push(LinearConstraint::le(vec![(idx_p(n, v, u), 1.0)], 1.0)); + } + } + + // d_v <= (n-1)(1 - p_{v,v}) ∀ v (root has depth 0) + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(idx_d(n, v), 1.0), (idx_p(n, v, v), big_m_depth)], + big_m_depth, + )); + } + + // d_v >= 0 ∀ v + for v in 0..n { + constraints.push(LinearConstraint::ge(vec![(idx_d(n, v), 1.0)], 0.0)); + } + + // d_v <= n-1 ∀ v + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(idx_d(n, v), 1.0)], big_m_depth)); + } + + // For u != v: d_v - d_u >= 1 - n(1 - p_{v,u}) + // d_v - d_u <= 1 + n(1 - p_{v,u}) + for v in 0..n { + for u in 0..n { + if u != v { + // d_v - d_u + n*p_{v,u} >= 1 - n + n = 1 + // => d_v - d_u + n*p_{v,u} >= 1 - n*(1 - p_{v,u}) + // Rewrite: d_v - d_u + n*p_{v,u} >= 1 - n + n*p_{v,u} ... no. + // Original: d_v - d_u >= 1 - n(1 - p_{v,u}) + // => d_v - d_u + n - n*p_{v,u} >= 1 + // => d_v - d_u - n*p_{v,u} >= 1 - n + constraints.push(LinearConstraint::ge( + vec![ + (idx_d(n, v), 1.0), + (idx_d(n, u), -1.0), + (idx_p(n, v, u), -big_m), + ], + 1.0 - big_m, + )); + + // d_v - d_u <= 1 + n(1 - p_{v,u}) + // => d_v - d_u - n + n*p_{v,u} <= 1 + // => d_v - d_u + n*p_{v,u} <= 1 + n + constraints.push(LinearConstraint::le( + vec![ + (idx_d(n, v), 1.0), + (idx_d(n, u), -1.0), + (idx_p(n, v, u), big_m), + ], + 1.0 + big_m, + )); + } + } + } + + // === Ancestor relation === + + // a_{v,v} = 1 ∀ v + for v in 0..n { + constraints.push(LinearConstraint::eq(vec![(idx_a(n, v, v), 1.0)], 1.0)); + } + + // h_{u,v,v} = 0 ∀ u,v + for u in 0..n { + for v in 0..n { + constraints.push(LinearConstraint::eq(vec![(idx_h(n, u, v, v), 1.0)], 0.0)); + } + } + + // For u != v: a_{u,v} = Σ_w h_{u,v,w} + for u in 0..n { + for v in 0..n { + if u != v { + let mut terms = vec![(idx_a(n, u, v), -1.0)]; + for w in 0..n { + terms.push((idx_h(n, u, v, w), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + } + } + + // h_{u,v,w} <= p_{v,w} ∀ u,v,w with w != v + // h_{u,v,w} <= a_{u,w} ∀ u,v,w with w != v + // h_{u,v,w} >= p_{v,w} + a_{u,w} - 1 ∀ u,v,w with w != v + for u in 0..n { + for v in 0..n { + for w in 0..n { + if w != v { + constraints.push(LinearConstraint::le( + vec![(idx_h(n, u, v, w), 1.0), (idx_p(n, v, w), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(idx_h(n, u, v, w), 1.0), (idx_a(n, u, w), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::ge( + vec![ + (idx_h(n, u, v, w), 1.0), + (idx_p(n, v, w), -1.0), + (idx_a(n, u, w), -1.0), + ], + -1.0, + )); + } + } + } + } + + // Binary bounds for a, h + for u in 0..n { + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(idx_a(n, u, v), 1.0)], 1.0)); + for w in 0..n { + constraints.push(LinearConstraint::le(vec![(idx_h(n, u, v, w), 1.0)], 1.0)); + } + } + } + + // === Subset gadgets === + for (s, &orig_k) in nontrivial.iter().enumerate() { + let subset = &subsets[orig_k]; + let subset_size = subset.len(); + + // Top selectors: Σ_{u ∈ S} t_{s,u} = 1, t_{s,u} = 0 for u ∉ S + let top_terms: Vec<(usize, f64)> = + subset.iter().map(|&u| (idx_t(n, r, s, u), 1.0)).collect(); + constraints.push(LinearConstraint::eq(top_terms, 1.0)); + for u in 0..n { + if !subset.contains(&u) { + constraints.push(LinearConstraint::eq(vec![(idx_t(n, r, s, u), 1.0)], 0.0)); + } + // Binary bound + constraints.push(LinearConstraint::le(vec![(idx_t(n, r, s, u), 1.0)], 1.0)); + } + + // Bottom selectors: Σ_{v ∈ S} b_{s,v} = 1, b_{s,v} = 0 for v ∉ S + let bot_terms: Vec<(usize, f64)> = + subset.iter().map(|&v| (idx_b(n, r, s, v), 1.0)).collect(); + constraints.push(LinearConstraint::eq(bot_terms, 1.0)); + for v in 0..n { + if !subset.contains(&v) { + constraints.push(LinearConstraint::eq(vec![(idx_b(n, r, s, v), 1.0)], 0.0)); + } + constraints.push(LinearConstraint::le(vec![(idx_b(n, r, s, v), 1.0)], 1.0)); + } + + // Pair selectors (McCormick): m_{s,u,v} = t_{s,u} * b_{s,v} + for u in 0..n { + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(idx_m(n, r, s, u, v), 1.0), (idx_t(n, r, s, u), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(idx_m(n, r, s, u, v), 1.0), (idx_b(n, r, s, v), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::ge( + vec![ + (idx_m(n, r, s, u, v), 1.0), + (idx_t(n, r, s, u), -1.0), + (idx_b(n, r, s, v), -1.0), + ], + -1.0, + )); + constraints.push(LinearConstraint::le(vec![(idx_m(n, r, s, u, v), 1.0)], 1.0)); + } + } + + // Path condition: m_{s,u,v} <= a_{u,v} (top is ancestor of bottom) + for u in 0..n { + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(idx_m(n, r, s, u, v), 1.0), (idx_a(n, u, v), -1.0)], + 0.0, + )); + } + } + + // Every subset element w lies on the chain: + // m_{s,u,v} <= a_{u,w} and m_{s,u,v} <= a_{w,v} ∀ w ∈ S, u, v + for &w in subset { + for u in 0..n { + for v in 0..n { + constraints.push(LinearConstraint::le( + vec![(idx_m(n, r, s, u, v), 1.0), (idx_a(n, u, w), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(idx_m(n, r, s, u, v), 1.0), (idx_a(n, w, v), -1.0)], + 0.0, + )); + } + } + } + + // Endpoint depths: T_s, B_s + // T_s - d_u <= (n-1)(1 - t_{s,u}) and d_u - T_s <= (n-1)(1 - t_{s,u}) + for &u in subset { + constraints.push(LinearConstraint::le( + vec![ + (idx_big_t(n, r, s), 1.0), + (idx_d(n, u), -1.0), + (idx_t(n, r, s, u), big_m_depth), + ], + big_m_depth, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_d(n, u), 1.0), + (idx_big_t(n, r, s), -1.0), + (idx_t(n, r, s, u), big_m_depth), + ], + big_m_depth, + )); + } + // B_s - d_v <= (n-1)(1 - b_{s,v}) and d_v - B_s <= (n-1)(1 - b_{s,v}) + for &v in subset { + constraints.push(LinearConstraint::le( + vec![ + (idx_big_b(n, r, s), 1.0), + (idx_d(n, v), -1.0), + (idx_b(n, r, s, v), big_m_depth), + ], + big_m_depth, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_d(n, v), 1.0), + (idx_big_b(n, r, s), -1.0), + (idx_b(n, r, s, v), big_m_depth), + ], + big_m_depth, + )); + } + + // Depth bounds for T_s, B_s + constraints.push(LinearConstraint::ge(vec![(idx_big_t(n, r, s), 1.0)], 0.0)); + constraints.push(LinearConstraint::le( + vec![(idx_big_t(n, r, s), 1.0)], + big_m_depth, + )); + constraints.push(LinearConstraint::ge(vec![(idx_big_b(n, r, s), 1.0)], 0.0)); + constraints.push(LinearConstraint::le( + vec![(idx_big_b(n, r, s), 1.0)], + big_m_depth, + )); + + // Extension cost: c_s = B_s - T_s + 1 - |S| + // => c_s - B_s + T_s = 1 - |S| + constraints.push(LinearConstraint::eq( + vec![ + (idx_c(n, r, s), 1.0), + (idx_big_b(n, r, s), -1.0), + (idx_big_t(n, r, s), 1.0), + ], + 1.0 - subset_size as f64, + )); + + // c_s >= 0 + constraints.push(LinearConstraint::ge(vec![(idx_c(n, r, s), 1.0)], 0.0)); + } + + // Total cost bound: Σ c_s <= K + if r > 0 { + let cost_terms: Vec<(usize, f64)> = (0..r).map(|s| (idx_c(n, r, s), 1.0)).collect(); + constraints.push(LinearConstraint::le(cost_terms, bound as f64)); + } + + let target = ILP::new(nv, constraints, vec![], ObjectiveSense::Minimize); + ReductionRTSAToILP { target, n } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "rootedtreestorageassignment_to_ilp", + build: || { + let source = RootedTreeStorageAssignment::new(3, vec![vec![0, 1], vec![1, 2]], 1); + let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&source); + let target_config = { + let ilp_solver = crate::solvers::ILPSolver::new(); + ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable") + }; + let source_config = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/rootedtreestorageassignment_ilp.rs"] +mod tests; diff --git a/src/rules/ruralpostman_ilp.rs b/src/rules/ruralpostman_ilp.rs new file mode 100644 index 00000000..3fc2a30a --- /dev/null +++ b/src/rules/ruralpostman_ilp.rs @@ -0,0 +1,251 @@ +//! Reduction from RuralPostman to ILP. +//! +//! Uses traversal multiplicity variables, parity variables, activation and +//! connectivity flow constraints to encode an Eulerian connected subgraph +//! covering all required edges within the length bound. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::RuralPostman; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::WeightElement; + +/// Result of reducing RuralPostman to ILP. +#[derive(Debug, Clone)] +pub struct ReductionRPToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionRPToILP { + type Source = RuralPostman; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Output the traversal multiplicities t_e + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_edges + num_vertices + num_edges + num_vertices + 2 * num_edges", + num_constraints = "2 * num_edges + num_required_edges + num_vertices + 2 * num_edges + num_vertices + 2 * num_edges + num_vertices + 1", + } +)] +impl ReduceTo> for RuralPostman { + type Result = ReductionRPToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_edges(); + let n = self.num_vertices(); + let edges = self.graph().edges(); + + // If E' is empty, the empty circuit satisfies when B >= 0 + if self.required_edges().is_empty() { + return ReductionRPToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_edges: 0, + }; + } + + // Pick root vertex: first endpoint of first required edge + let root = edges[self.required_edges()[0]].0; + + // Variable layout: + // t_e: index e (0..m) -- traversal multiplicity {0,1,2} + // q_v: index m + v -- parity variable (degree/2) + // y_e: index m + n + e -- binary edge activation + // z_v: index m + n + m + v -- binary vertex activity + // f_{e,0}: index m + n + m + n + 2*e -- flow u->v + // f_{e,1}: index m + n + m + n + 2*e + 1 -- flow v->u + let t_idx = |e: usize| e; + let q_idx = |v: usize| m + v; + let y_idx = |e: usize| m + n + e; + let z_idx = |v: usize| m + n + m + v; + let f_idx = |e: usize, dir: usize| m + n + m + n + 2 * e + dir; + + let num_vars = m + n + m + n + 2 * m; + let mut constraints = Vec::new(); + + // y_e <= t_e and t_e <= 2*y_e for each edge + for e in 0..m { + constraints.push(LinearConstraint::le( + vec![(y_idx(e), 1.0), (t_idx(e), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(t_idx(e), 1.0), (y_idx(e), -2.0)], + 0.0, + )); + } + + // t_e >= 1 for required edges + for &req_idx in self.required_edges() { + constraints.push(LinearConstraint::ge(vec![(t_idx(req_idx), 1.0)], 1.0)); + } + + // Even degree: sum_{e : v in e} t_e = 2 * q_v for all v + for v in 0..n { + let mut terms = Vec::new(); + for (e, &(u, w)) in edges.iter().enumerate() { + if u == v || w == v { + terms.push((t_idx(e), 1.0)); + } + } + terms.push((q_idx(v), -2.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // y_e <= z_u and y_e <= z_v for each edge e = {u,v} + for (e, &(u, v)) in edges.iter().enumerate() { + constraints.push(LinearConstraint::le( + vec![(y_idx(e), 1.0), (z_idx(u), -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(y_idx(e), 1.0), (z_idx(v), -1.0)], + 0.0, + )); + } + + // z_v <= sum_{e : v in e} y_e for all v + for v in 0..n { + let mut terms = vec![(z_idx(v), 1.0)]; + for (e, &(u, w)) in edges.iter().enumerate() { + if u == v || w == v { + terms.push((y_idx(e), -1.0)); + } + } + constraints.push(LinearConstraint::le(terms, 0.0)); + } + + // Flow capacity: f_{u,v} <= (n-1)*y_e and f_{v,u} <= (n-1)*y_e + let big_m = (n - 1) as f64; + for e in 0..m { + constraints.push(LinearConstraint::le( + vec![(f_idx(e, 0), 1.0), (y_idx(e), -big_m)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(f_idx(e, 1), 1.0), (y_idx(e), -big_m)], + 0.0, + )); + } + + // Connectivity flow from root: + // Root: sum_{w: {r,w} in E} f_{r,w} - sum_{u: {u,r} in E} f_{u,r} = sum_v z_v - 1 + // For non-root v: sum_{u: {u,v} in E} f_{u,v} - sum_{w: {v,w} in E} f_{v,w} = z_v + + // Root conservation: outflow - inflow = sum_v z_v - 1 + { + let mut terms = Vec::new(); + for (e, &(u, v)) in edges.iter().enumerate() { + if u == root { + terms.push((f_idx(e, 0), 1.0)); // outgoing from root via dir 0 + terms.push((f_idx(e, 1), -1.0)); // incoming to root via dir 1 + } + if v == root { + terms.push((f_idx(e, 1), 1.0)); // outgoing from root via dir 1 + terms.push((f_idx(e, 0), -1.0)); // incoming to root via dir 0 + } + } + // rhs = sum_v z_v - 1, move z_v to left side + for v in 0..n { + terms.push((z_idx(v), -1.0)); + } + constraints.push(LinearConstraint::eq(terms, -1.0)); + } + + // Non-root vertices: inflow - outflow = z_v + // The paper says: sum_{u: {u,v}} f_{u,v} - sum_{w: {v,w}} f_{v,w} = z_v + // This means: inflow - outflow = z_v (each non-root active vertex absorbs 1 unit) + for v in 0..n { + if v == root { + continue; + } + let mut terms = Vec::new(); + for (e, &(u, w)) in edges.iter().enumerate() { + if u == v { + // Edge e = {v, w}: dir 0 is v->w (outgoing), dir 1 is w->v (incoming) + terms.push((f_idx(e, 0), -1.0)); // outgoing + terms.push((f_idx(e, 1), 1.0)); // incoming + } + if w == v { + // Edge e = {u, v}: dir 0 is u->v (incoming), dir 1 is v->u (outgoing) + terms.push((f_idx(e, 0), 1.0)); // incoming + terms.push((f_idx(e, 1), -1.0)); // outgoing + } + } + terms.push((z_idx(v), -1.0)); + constraints.push(LinearConstraint::eq(terms, 0.0)); + } + + // Length bound: sum_e l_e * t_e <= B + let edge_lengths = self.edge_lengths(); + let length_terms: Vec<(usize, f64)> = (0..m) + .map(|e| (t_idx(e), edge_lengths[e].to_sum() as f64)) + .collect(); + constraints.push(LinearConstraint::le(length_terms, *self.bound() as f64)); + + // Upper bound on t_e: t_e <= 2 + for e in 0..m { + constraints.push(LinearConstraint::le(vec![(t_idx(e), 1.0)], 2.0)); + } + + // Upper bounds on binary variables: y_e <= 1, z_v <= 1 + for e in 0..m { + constraints.push(LinearConstraint::le(vec![(y_idx(e), 1.0)], 1.0)); + } + for v in 0..n { + constraints.push(LinearConstraint::le(vec![(z_idx(v), 1.0)], 1.0)); + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionRPToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ruralpostman_to_ilp", + build: || { + // Triangle: 3 vertices, 3 edges, require edge 0, bound 3 + let source = RuralPostman::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + vec![0], + 3, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ruralpostman_ilp.rs"] +mod tests; diff --git a/src/rules/sequencingtominimizemaximumcumulativecost_ilp.rs b/src/rules/sequencingtominimizemaximumcumulativecost_ilp.rs new file mode 100644 index 00000000..65165b6a --- /dev/null +++ b/src/rules/sequencingtominimizemaximumcumulativecost_ilp.rs @@ -0,0 +1,125 @@ +//! Reduction from SequencingToMinimizeMaximumCumulativeCost to ILP. +//! +//! Position-assignment ILP: binary x_{j,p} placing task j in position p. +//! Permutation constraints, precedence constraints, and prefix cumulative-cost +//! bounds at every position. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingToMinimizeMaximumCumulativeCost; +use crate::reduction; +use crate::rules::ilp_helpers::{one_hot_decode, permutation_to_lehmer}; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SequencingToMinimizeMaximumCumulativeCost to ILP. +/// +/// Variable layout: +/// - x_{j,p} for j in 0..n, p in 0..n: index `j*n + p` +/// +/// Total: n^2 variables. +#[derive(Debug, Clone)] +pub struct ReductionSTMMCCToILP { + target: ILP, + num_tasks: usize, +} + +impl ReductionResult for ReductionSTMMCCToILP { + type Source = SequencingToMinimizeMaximumCumulativeCost; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: decode position assignment → permutation → Lehmer code. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + let schedule = one_hot_decode(target_solution, n, n, 0); + permutation_to_lehmer(&schedule) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * num_tasks", + num_constraints = "2 * num_tasks + num_precedences + num_tasks", +})] +impl ReduceTo> for SequencingToMinimizeMaximumCumulativeCost { + type Result = ReductionSTMMCCToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let num_vars = n * n; + + let x_var = |j: usize, p: usize| -> usize { j * n + p }; + + let mut constraints = Vec::new(); + + // 1. Each task assigned to exactly one position: Σ_p x_{j,p} = 1 for all j + for j in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each position has exactly one task: Σ_j x_{j,p} = 1 for all p + for p in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|j| (x_var(j, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 3. Precedence: Σ_p p*x_{i,p} + 1 <= Σ_p p*x_{j,p} for each (i,j) + for &(i, j) in self.precedences() { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for p in 0..n { + terms.push((x_var(j, p), p as f64)); + terms.push((x_var(i, p), -(p as f64))); + } + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + + // 4. Prefix cumulative cost: Σ_j Σ_{p in 0..=q} c_j * x_{j,p} <= K for all q + let costs = self.costs(); + let bound = self.bound(); + for q in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for (j, &c_j) in costs.iter().enumerate() { + for p in 0..=q { + terms.push((x_var(j, p), c_j as f64)); + } + } + constraints.push(LinearConstraint::le(terms, bound as f64)); + } + + ReductionSTMMCCToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_tasks: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingtominimizemaximumcumulativecost_to_ilp", + build: || { + let source = + SequencingToMinimizeMaximumCumulativeCost::new(vec![2, -1, 3, -2], vec![(0, 2)], 4); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingtominimizemaximumcumulativecost_ilp.rs"] +mod tests; diff --git a/src/rules/sequencingtominimizeweightedtardiness_ilp.rs b/src/rules/sequencingtominimizeweightedtardiness_ilp.rs new file mode 100644 index 00000000..38b655ed --- /dev/null +++ b/src/rules/sequencingtominimizeweightedtardiness_ilp.rs @@ -0,0 +1,185 @@ +//! Reduction from SequencingToMinimizeWeightedTardiness to ILP. +//! +//! Pairwise order variables y_{i,j}, integer completion times C_j, +//! and nonnegative tardiness variables T_j. Big-M disjunctive constraints +//! force a single-machine order; the weighted tardiness sum is bounded by K. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingToMinimizeWeightedTardiness; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SequencingToMinimizeWeightedTardiness to ILP. +/// +/// Variable layout: +/// - `y_{i,j}` for i < j: pairwise order bits (n*(n-1)/2 vars) +/// - `C_j` for j in 0..n: completion times (n vars) +/// - `T_j` for j in 0..n: tardiness (n vars) +/// +/// Total: n*(n-1)/2 + 2*n variables. +#[derive(Debug, Clone)] +pub struct ReductionSTMWTToILP { + target: ILP, + num_tasks: usize, + num_order_vars: usize, +} + +impl ReductionSTMWTToILP { + fn encode_schedule_as_lehmer(schedule: &[usize]) -> Vec { + let mut available: Vec = (0..schedule.len()).collect(); + let mut config = Vec::with_capacity(schedule.len()); + for &task in schedule { + let digit = available + .iter() + .position(|&c| c == task) + .expect("schedule must be a permutation"); + config.push(digit); + available.remove(digit); + } + config + } +} + +impl ReductionResult for ReductionSTMWTToILP { + type Source = SequencingToMinimizeWeightedTardiness; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: sort jobs by completion time C_j, convert to Lehmer code. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + let c_offset = self.num_order_vars; + let mut jobs: Vec = (0..n).collect(); + jobs.sort_by_key(|&j| (target_solution.get(c_offset + j).copied().unwrap_or(0), j)); + Self::encode_schedule_as_lehmer(&jobs) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * (num_tasks - 1) / 2 + 2 * num_tasks", + num_constraints = "num_tasks * (num_tasks - 1) / 2 + num_tasks + num_tasks * (num_tasks - 1) + 2 * num_tasks + 1", +})] +impl ReduceTo> for SequencingToMinimizeWeightedTardiness { + type Result = ReductionSTMWTToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let num_order_vars = n * n.saturating_sub(1) / 2; + let num_vars = num_order_vars + 2 * n; + + let order_var = |i: usize, j: usize| -> usize { + debug_assert!(i < j); + i * (2 * n - i - 1) / 2 + (j - i - 1) + }; + let c_var = |j: usize| -> usize { num_order_vars + j }; + let t_var = |j: usize| -> usize { num_order_vars + n + j }; + + let lengths = self.lengths(); + let deadlines = self.deadlines(); + let weights = self.weights(); + let bound = self.bound(); + + // M = sum of all lengths (valid schedule-horizon bound) + let big_m: f64 = lengths.iter().sum::() as f64; + + let mut constraints = Vec::new(); + + // 1. y_{i,j} in {0,1}: 0 <= y_{i,j} <= 1 + for i in 0..n { + for j in (i + 1)..n { + constraints.push(LinearConstraint::le(vec![(order_var(i, j), 1.0)], 1.0)); + constraints.push(LinearConstraint::ge(vec![(order_var(i, j), 1.0)], 0.0)); + } + } + + // 2. C_j >= l_j for all j + for (j, &l_j) in lengths.iter().enumerate() { + constraints.push(LinearConstraint::ge(vec![(c_var(j), 1.0)], l_j as f64)); + } + + // 3. Disjunctive: C_j >= C_i + l_j - M*(1 - y_{i,j}) for i != j + for i in 0..n { + for (j, &l_j) in lengths.iter().enumerate() { + if i == j { + continue; + } + if i < j { + // y_{i,j} is the stored variable. + // C_j >= C_i + l_j - M*(1 - y_{i,j}) + // => C_j - C_i - M*y_{i,j} >= l_j - M + constraints.push(LinearConstraint::ge( + vec![(c_var(j), 1.0), (c_var(i), -1.0), (order_var(i, j), -big_m)], + l_j as f64 - big_m, + )); + } else { + // i > j: y_{j,i} is stored, y_{i,j} = 1 - y_{j,i} + // C_j >= C_i + l_j - M*y_{j,i} + // C_j - C_i + M*y_{j,i} >= l_j + constraints.push(LinearConstraint::ge( + vec![(c_var(j), 1.0), (c_var(i), -1.0), (order_var(j, i), big_m)], + l_j as f64, + )); + } + } + } + + // 4. T_j >= C_j - d_j for all j + for (j, &d_j) in deadlines.iter().enumerate() { + constraints.push(LinearConstraint::ge( + vec![(t_var(j), 1.0), (c_var(j), -1.0)], + -(d_j as f64), + )); + } + + // 5. T_j >= 0 for all j + for j in 0..n { + constraints.push(LinearConstraint::ge(vec![(t_var(j), 1.0)], 0.0)); + } + + // 6. Σ_j w_j * T_j <= K + let terms: Vec<(usize, f64)> = (0..n).map(|j| (t_var(j), weights[j] as f64)).collect(); + constraints.push(LinearConstraint::le(terms, bound as f64)); + + ReductionSTMWTToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_tasks: n, + num_order_vars, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingtominimizeweightedtardiness_to_ilp", + build: || { + let source = SequencingToMinimizeWeightedTardiness::new( + vec![3, 4, 2], + vec![2, 3, 1], + vec![5, 8, 4], + 10, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingtominimizeweightedtardiness_ilp.rs"] +mod tests; diff --git a/src/rules/sequencingwithreleasetimesanddeadlines_ilp.rs b/src/rules/sequencingwithreleasetimesanddeadlines_ilp.rs new file mode 100644 index 00000000..61f43d22 --- /dev/null +++ b/src/rules/sequencingwithreleasetimesanddeadlines_ilp.rs @@ -0,0 +1,170 @@ +//! Reduction from SequencingWithReleaseTimesAndDeadlines to ILP. +//! +//! Time-indexed formulation: binary x_{j,t} = 1 iff task j starts at time t. +//! Each task starts within its admissible window [r_j, d_j - p_j]. +//! No two tasks may overlap on the single machine. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::SequencingWithReleaseTimesAndDeadlines; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing SequencingWithReleaseTimesAndDeadlines to ILP. +/// +/// Variable layout: x_{j,t} at index `j * T + t` for j in 0..n, t in 0..T, +/// where T = time_horizon (max deadline). +#[derive(Debug, Clone)] +pub struct ReductionSWRTDToILP { + target: ILP, + num_tasks: usize, + time_horizon: usize, +} + +impl ReductionSWRTDToILP { + fn encode_schedule_as_lehmer(schedule: &[usize]) -> Vec { + let mut available: Vec = (0..schedule.len()).collect(); + let mut config = Vec::with_capacity(schedule.len()); + for &task in schedule { + let digit = available + .iter() + .position(|&c| c == task) + .expect("schedule must be a permutation"); + config.push(digit); + available.remove(digit); + } + config + } +} + +impl ReductionResult for ReductionSWRTDToILP { + type Source = SequencingWithReleaseTimesAndDeadlines; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: read each task's start time, sort tasks by start time, + /// encode as Lehmer code. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_tasks; + let horizon = self.time_horizon; + // For each task, find the start time + let mut start_times: Vec<(usize, usize)> = (0..n) + .map(|j| { + let start = (0..horizon) + .find(|&t| target_solution.get(j * horizon + t).copied().unwrap_or(0) == 1) + .unwrap_or(0); + (j, start) + }) + .collect(); + // Sort by start time (break ties by task index) + start_times.sort_by_key(|&(j, t)| (t, j)); + let schedule: Vec = start_times.iter().map(|&(j, _)| j).collect(); + Self::encode_schedule_as_lehmer(&schedule) + } +} + +#[reduction(overhead = { + num_vars = "num_tasks * time_horizon", + num_constraints = "num_tasks + time_horizon", +})] +impl ReduceTo> for SequencingWithReleaseTimesAndDeadlines { + type Result = ReductionSWRTDToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_tasks(); + let horizon = self.time_horizon() as usize; + let num_vars = n * horizon; + + let var = |j: usize, t: usize| -> usize { j * horizon + t }; + + let lengths = self.lengths(); + let release_times = self.release_times(); + let deadlines = self.deadlines(); + + let mut constraints = Vec::new(); + + // 1. Each task starts exactly once within its admissible window: + // Σ_{t=r_j}^{d_j-p_j} x_{j,t} = 1 for all j. + // Also, x_{j,t} = 0 for t outside the window (handled implicitly + // by not including them; add explicit zero constraints for safety). + for j in 0..n { + let r = release_times[j] as usize; + let last_start = if deadlines[j] >= lengths[j] { + (deadlines[j] - lengths[j]) as usize + } else { + 0 + }; + let terms: Vec<(usize, f64)> = (r..=last_start) + .filter(|&t| t < horizon) + .map(|t| (var(j, t), 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + + // Zero-fix variables outside the admissible window + for t in 0..horizon { + if t < r || t > last_start { + constraints.push(LinearConstraint::eq(vec![(var(j, t), 1.0)], 0.0)); + } + } + } + + // 2. No overlap: for each time instant tau in 0..horizon, + // Σ_{j,t : t <= tau < t + p_j} x_{j,t} <= 1 + for tau in 0..horizon { + let mut terms: Vec<(usize, f64)> = Vec::new(); + for (j, &len_j) in lengths.iter().enumerate() { + let p = len_j as usize; + // Task j started at time t overlaps tau iff t <= tau < t + p_j + // i.e., tau - p_j + 1 <= t <= tau, where t >= 0 + let t_min = (tau + 1).saturating_sub(p); + let t_max = tau; + for t in t_min..=t_max { + if t < horizon { + terms.push((var(j, t), 1.0)); + } + } + } + constraints.push(LinearConstraint::le(terms, 1.0)); + } + + ReductionSWRTDToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_tasks: n, + time_horizon: horizon, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "sequencingwithreleasetimesanddeadlines_to_ilp", + build: || { + let source = SequencingWithReleaseTimesAndDeadlines::new( + vec![1, 2, 1], + vec![0, 0, 2], + vec![3, 3, 4], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sequencingwithreleasetimesanddeadlines_ilp.rs"] +mod tests; diff --git a/src/rules/shortestcommonsupersequence_ilp.rs b/src/rules/shortestcommonsupersequence_ilp.rs new file mode 100644 index 00000000..ca23f608 --- /dev/null +++ b/src/rules/shortestcommonsupersequence_ilp.rs @@ -0,0 +1,157 @@ +//! Reduction from ShortestCommonSupersequence to ILP (Integer Linear Programming). +//! +//! One-hot symbol variables x_{p,a} for each position p and symbol a, plus +//! matching variables m_{s,j,p} indicating that the j-th character of string s +//! is matched to position p. Monotonicity forces strictly increasing match +//! positions per string. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::ShortestCommonSupersequence; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionSCSToILP { + target: ILP, + bound: usize, + alphabet_size: usize, +} + +impl ReductionResult for ReductionSCSToILP { + type Source = ShortestCommonSupersequence; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// At each position p, output the unique symbol a with x_{p,a} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let b = self.bound; + let k = self.alphabet_size; + (0..b) + .map(|p| { + (0..k) + .find(|&a| target_solution[p * k + a] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "bound * alphabet_size + total_length * bound", + num_constraints = "bound + total_length + total_length * bound + total_length", + } +)] +impl ReduceTo> for ShortestCommonSupersequence { + type Result = ReductionSCSToILP; + + fn reduce_to(&self) -> Self::Result { + let b = self.bound(); + let k = self.alphabet_size(); + let strings = self.strings(); + + // Variable layout: + // x_{p,a}: position p carries symbol a, index p*k + a for p in 0..b, a in 0..k + // m_{s,j,p}: j-th char of string s matched to position p + // We flatten (s,j) into a global character index. + let x_count = b * k; + + // Build global char index: for string s, char j, the global index is sum of lengths before s + j + let mut char_offsets = Vec::with_capacity(strings.len()); + let mut total_chars = 0usize; + for s_str in strings { + char_offsets.push(total_chars); + total_chars += s_str.len(); + } + + // m_{global_char, p}: index x_count + global_char * b + p + let m_offset = x_count; + let num_vars = x_count + total_chars * b; + + let mut constraints = Vec::new(); + + // 1. One-hot symbol at each position: Σ_a x_{p,a} = 1 ∀ p + for p in 0..b { + let terms: Vec<(usize, f64)> = (0..k).map(|a| (p * k + a, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 2. Each character matched to exactly one position: Σ_p m_{gc,p} = 1 + for gc in 0..total_chars { + let terms: Vec<(usize, f64)> = (0..b).map(|p| (m_offset + gc * b + p, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // 3. Symbol consistency: m_{gc,p} <= x_{p,a} where a is the symbol at gc + for (s_idx, s_str) in strings.iter().enumerate() { + for (j, &sym) in s_str.iter().enumerate() { + let gc = char_offsets[s_idx] + j; + for p in 0..b { + // m_{gc,p} <= x_{p,sym} + constraints.push(LinearConstraint::le( + vec![(m_offset + gc * b + p, 1.0), (p * k + sym, -1.0)], + 0.0, + )); + } + } + } + + // 4. Monotonicity: matching positions strictly increase within each string. + // For consecutive chars j and j+1 of string s: + // Σ_p p * m_{gc_j,p} < Σ_p p * m_{gc_{j+1},p} + // i.e., Σ_p p * m_{gc_{j+1},p} - Σ_p p * m_{gc_j,p} >= 1 + for (s_idx, s_str) in strings.iter().enumerate() { + for j in 0..s_str.len().saturating_sub(1) { + let gc_j = char_offsets[s_idx] + j; + let gc_next = char_offsets[s_idx] + j + 1; + let mut terms = Vec::new(); + for p in 0..b { + terms.push((m_offset + gc_next * b + p, p as f64)); + terms.push((m_offset + gc_j * b + p, -(p as f64))); + } + constraints.push(LinearConstraint::ge(terms, 1.0)); + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionSCSToILP { + target, + bound: b, + alphabet_size: k, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "shortestcommonsupersequence_to_ilp", + build: || { + // Alphabet {0,1}, strings [0,1] and [1,0], bound 3 + let source = ShortestCommonSupersequence::new(2, vec![vec![0, 1], vec![1, 0]], 3); + let reduction: ReductionSCSToILP = ReduceTo::>::reduce_to(&source); + let target_config = { + let ilp_solver = crate::solvers::ILPSolver::new(); + ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable") + }; + let source_config = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/shortestcommonsupersequence_ilp.rs"] +mod tests; diff --git a/src/rules/sparsematrixcompression_ilp.rs b/src/rules/sparsematrixcompression_ilp.rs new file mode 100644 index 00000000..3cf26a8c --- /dev/null +++ b/src/rules/sparsematrixcompression_ilp.rs @@ -0,0 +1,140 @@ +//! Reduction from SparseMatrixCompression to ILP. +//! +//! Assign each row one shift value and forbid any pair of shifted 1-entries +//! from colliding in the storage vector. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, SparseMatrixCompression, ILP}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionSMCToILP { + target: ILP, + num_rows: usize, + bound_k: usize, +} + +impl ReductionResult for ReductionSMCToILP { + type Source = SparseMatrixCompression; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // For each row r, output the unique zero-based shift g with x_{r,g} = 1 + (0..self.num_rows) + .map(|r| { + (0..self.bound_k) + .find(|&g| target_solution[r * self.bound_k + g] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_rows * bound_k", + num_constraints = "num_rows + num_rows * num_rows * bound_k * bound_k", + } +)] +impl ReduceTo> for SparseMatrixCompression { + type Result = ReductionSMCToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_rows(); + let n = self.num_cols(); + let k = self.bound_k(); + + // Variable layout: + // x_{r,g}: m*K binary variables at [0, m*K) + // x_{r*K + g} = 1 iff row r uses shift g (zero-based) + let num_vars = m * k; + let mut constraints = Vec::new(); + + // Each row assigned exactly one shift + for r in 0..m { + let terms: Vec<(usize, f64)> = (0..k).map(|g| (r * k + g, 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Collision constraints: + // x_{r,g} + x_{s,h} <= 1 whenever A_{r,i} = A_{s,j} = 1 and i + g = j + h + // (for different rows r != s, or same row r = s but different columns i != j) + for r in 0..m { + for s in (r + 1)..m { + for i in 0..n { + if !self.matrix()[r][i] { + continue; + } + for j in 0..n { + if !self.matrix()[s][j] { + continue; + } + // Collision when i + g = j + h, i.e., g - h = j - i + for g in 0..k { + // h = g + i - j (must be in [0, k)) + let gi = g + i; + if gi < j { + continue; + } + let h = gi - j; + if h >= k { + continue; + } + constraints.push(LinearConstraint::le( + vec![(r * k + g, 1.0), (s * k + h, 1.0)], + 1.0, + )); + } + } + } + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionSMCToILP { + target, + num_rows: m, + bound_k: k, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "sparsematrixcompression_to_ilp", + build: || { + let source = SparseMatrixCompression::new( + vec![ + vec![true, false, false, true], + vec![false, true, false, false], + vec![false, false, true, false], + vec![true, false, false, false], + ], + 2, + ); + let reduction: ReductionSMCToILP = ReduceTo::>::reduce_to(&source); + let ilp_solver = crate::solvers::ILPSolver::new(); + let target_config = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/sparsematrixcompression_ilp.rs"] +mod tests; diff --git a/src/rules/stackercrane_ilp.rs b/src/rules/stackercrane_ilp.rs new file mode 100644 index 00000000..99eea568 --- /dev/null +++ b/src/rules/stackercrane_ilp.rs @@ -0,0 +1,241 @@ +//! Reduction from StackerCrane to ILP. +//! +//! One-hot position assignment for required arcs with McCormick products +//! for consecutive-pair costs. Uses precomputed shortest-path connector +//! distances. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::StackerCrane; +use crate::reduction; +use crate::rules::ilp_helpers::one_hot_decode; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing StackerCrane to ILP. +/// +/// Variable layout (all binary): +/// - `x_{i,p}` at index `i*m + p` for i,p in 0..m +/// - `z_{i,j,p}` at index `m^2 + p*m^2 + i*m + j` for i,j,p in 0..m +/// +/// Total: `m^2 + m^3` variables. +#[derive(Debug, Clone)] +pub struct ReductionSCToILP { + target: ILP, + num_arcs: usize, +} + +impl ReductionResult for ReductionSCToILP { + type Source = StackerCrane; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + // Decode the permutation: for each position p, find the arc a with x_{a,p} = 1 + one_hot_decode(target_solution, self.num_arcs, self.num_arcs, 0) + } +} + +#[reduction( + overhead = { + num_vars = "num_arcs * num_arcs + num_arcs * num_arcs * num_arcs", + num_constraints = "num_arcs + num_arcs + 3 * num_arcs * num_arcs * num_arcs + 1", + } +)] +impl ReduceTo> for StackerCrane { + type Result = ReductionSCToILP; + + fn reduce_to(&self) -> Self::Result { + let m = self.num_arcs(); + + if m == 0 { + return ReductionSCToILP { + target: ILP::new(0, vec![], vec![], ObjectiveSense::Minimize), + num_arcs: 0, + }; + } + + let num_vars = m * m + m * m * m; + let x_idx = |i: usize, p: usize| i * m + p; + let z_idx = |i: usize, j: usize, p: usize| m * m + p * m * m + i * m + j; + + // Compute all-pairs shortest path distances in the mixed graph + let n = self.num_vertices(); + let distances = all_pairs_shortest_paths( + n, + self.arcs(), + self.arc_lengths(), + self.edges(), + self.edge_lengths(), + ); + + let mut constraints = Vec::new(); + + // Each arc assigned to exactly one position: sum_p x_{i,p} = 1 for all i + for i in 0..m { + let terms: Vec<(usize, f64)> = (0..m).map(|p| (x_idx(i, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Each position assigned exactly one arc: sum_i x_{i,p} = 1 for all p + for p in 0..m { + let terms: Vec<(usize, f64)> = (0..m).map(|i| (x_idx(i, p), 1.0)).collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // McCormick linearization for z_{i,j,p} = x_{i,p} * x_{j,(p+1) mod m} + for p in 0..m { + let next_p = (p + 1) % m; + for i in 0..m { + for j in 0..m { + let head_i = self.arcs()[i].1; + let tail_j = self.arcs()[j].0; + + if distances[head_i][tail_j] == i64::MAX { + // Infeasible pair: z_{i,j,p} = 0 + constraints.push(LinearConstraint::eq(vec![(z_idx(i, j, p), 1.0)], 0.0)); + } else { + // z <= x_{i,p} + constraints.push(LinearConstraint::le( + vec![(z_idx(i, j, p), 1.0), (x_idx(i, p), -1.0)], + 0.0, + )); + // z <= x_{j, next_p} + constraints.push(LinearConstraint::le( + vec![(z_idx(i, j, p), 1.0), (x_idx(j, next_p), -1.0)], + 0.0, + )); + // z >= x_{i,p} + x_{j, next_p} - 1 + constraints.push(LinearConstraint::le( + vec![ + (x_idx(i, p), 1.0), + (x_idx(j, next_p), 1.0), + (z_idx(i, j, p), -1.0), + ], + 1.0, + )); + } + } + } + } + + // Bound constraint: + // sum_i l_i + sum_p sum_i sum_j D[head_i, tail_j] * z_{i,j,p} <= B + let mut bound_terms = Vec::new(); + let arc_length_sum: f64 = self.arc_lengths().iter().map(|&l| l as f64).sum(); + for p in 0..m { + for i in 0..m { + for j in 0..m { + let head_i = self.arcs()[i].1; + let tail_j = self.arcs()[j].0; + let dist = distances[head_i][tail_j]; + if dist < i64::MAX { + bound_terms.push((z_idx(i, j, p), dist as f64)); + } + } + } + } + // We can't add a constant to the LHS in LinearConstraint, so move it to RHS + // sum D*z <= B - sum l_i + let rhs = self.bound() as f64 - arc_length_sum; + constraints.push(LinearConstraint::le(bound_terms, rhs)); + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionSCToILP { + target, + num_arcs: m, + } + } +} + +/// All-pairs shortest paths via Floyd-Warshall on the mixed graph. +fn all_pairs_shortest_paths( + n: usize, + arcs: &[(usize, usize)], + arc_lengths: &[i32], + edges: &[(usize, usize)], + edge_lengths: &[i32], +) -> Vec> { + let mut dist = vec![vec![i64::MAX; n]; n]; + for (i, row) in dist.iter_mut().enumerate() { + row[i] = 0; + } + + // Directed arcs + for (&(u, v), &length) in arcs.iter().zip(arc_lengths) { + let cost = i64::from(length); + if cost < dist[u][v] { + dist[u][v] = cost; + } + } + + // Undirected edges (both directions) + for (&(u, v), &length) in edges.iter().zip(edge_lengths) { + let cost = i64::from(length); + if cost < dist[u][v] { + dist[u][v] = cost; + } + if cost < dist[v][u] { + dist[v][u] = cost; + } + } + + // Floyd-Warshall + for via in 0..n { + for src in 0..n { + if dist[src][via] == i64::MAX { + continue; + } + for dst in 0..n { + if dist[via][dst] == i64::MAX { + continue; + } + let through = dist[src][via] + dist[via][dst]; + if through < dist[src][dst] { + dist[src][dst] = through; + } + } + } + } + + dist +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "stackercrane_to_ilp", + build: || { + // Simple: 3 vertices, 2 arcs, 1 edge + let source = StackerCrane::new( + 3, + vec![(0, 1), (2, 0)], + vec![(1, 2)], + vec![1, 1], + vec![1], + 4, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/stackercrane_ilp.rs"] +mod tests; diff --git a/src/rules/steinertreeingraphs_ilp.rs b/src/rules/steinertreeingraphs_ilp.rs new file mode 100644 index 00000000..467c8c0f --- /dev/null +++ b/src/rules/steinertreeingraphs_ilp.rs @@ -0,0 +1,160 @@ +//! Reduction from SteinerTreeInGraphs to ILP (Integer Linear Programming). +//! +//! Uses the rooted multi-commodity flow formulation: +//! - Variables: binary edge selectors `y_e` plus binary directed flow variables +//! `f^t_(u,v)` for each non-root terminal `t` +//! - Constraints: flow conservation and capacity linking `f^t_(u,v) <= y_e` +//! - Objective: minimize total weight of selected edges + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::SteinerTreeInGraphs; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::WeightElement; + +/// Result of reducing SteinerTreeInGraphs to ILP. +/// +/// Variable layout (all binary): +/// - `y_e` for each undirected source edge `e` (indices `0..m`) +/// - `f^t_(u,v)` and `f^t_(v,u)` for each non-root terminal `t` and each edge +/// (indices `m..m + 2m(k-1)`) +#[derive(Debug, Clone)] +pub struct ReductionSTIGToILP { + target: ILP, + num_edges: usize, +} + +impl ReductionResult for ReductionSTIGToILP { + type Source = SteinerTreeInGraphs; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_edges].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_edges + 2 * num_edges * (num_terminals - 1)", + num_constraints = "num_vertices * (num_terminals - 1) + 2 * num_edges * (num_terminals - 1)", + } +)] +impl ReduceTo> for SteinerTreeInGraphs { + type Result = ReductionSTIGToILP; + + fn reduce_to(&self) -> Self::Result { + assert!( + self.weights().iter().all(|&w| w > 0), + "SteinerTreeInGraphs -> ILP requires strictly positive edge weights" + ); + + let n = self.num_vertices(); + let m = self.num_edges(); + let root = self.terminals()[0]; + let non_root_terminals = &self.terminals()[1..]; + let edges = self.graph().edges(); + let num_vars = m + 2 * m * non_root_terminals.len(); + let mut constraints = Vec::new(); + + let edge_var = |edge_idx: usize| edge_idx; + let flow_var = |terminal_pos: usize, edge_idx: usize, dir: usize| -> usize { + m + terminal_pos * 2 * m + 2 * edge_idx + dir + }; + + // Flow conservation for each non-root terminal commodity + for (terminal_pos, &terminal) in non_root_terminals.iter().enumerate() { + for vertex in 0..n { + let mut terms = Vec::new(); + for (edge_idx, &(u, v)) in edges.iter().enumerate() { + if v == vertex { + terms.push((flow_var(terminal_pos, edge_idx, 0), 1.0)); + terms.push((flow_var(terminal_pos, edge_idx, 1), -1.0)); + } + if u == vertex { + terms.push((flow_var(terminal_pos, edge_idx, 0), -1.0)); + terms.push((flow_var(terminal_pos, edge_idx, 1), 1.0)); + } + } + + let rhs = if vertex == root { + -1.0 + } else if vertex == terminal { + 1.0 + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + } + + // Capacity linking: f^t_{e,dir} <= y_e + for terminal_pos in 0..non_root_terminals.len() { + for edge_idx in 0..m { + let selector = edge_var(edge_idx); + constraints.push(LinearConstraint::le( + vec![(flow_var(terminal_pos, edge_idx, 0), 1.0), (selector, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(flow_var(terminal_pos, edge_idx, 1), 1.0), (selector, -1.0)], + 0.0, + )); + } + } + + // Objective: minimize total weight + let edge_weights = self.weights(); + let objective: Vec<(usize, f64)> = edge_weights + .iter() + .enumerate() + .map(|(edge_idx, w)| (edge_var(edge_idx), w.to_sum() as f64)) + .collect(); + + let target = ILP::new(num_vars, constraints, objective, ObjectiveSense::Minimize); + + ReductionSTIGToILP { + target, + num_edges: m, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::rules::ReduceTo as _; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "steinertreeingraphs_to_ilp", + build: || { + // 4 vertices, 4 edges, 2 terminals + // ILP: 4 + 2*4*1 = 12 binary variables = 4096 configs + let source = SteinerTreeInGraphs::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (0, 3)]), + vec![0, 2], + vec![1, 1, 1, 3], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/steinertreeingraphs_ilp.rs"] +mod tests; diff --git a/src/rules/stringtostringcorrection_ilp.rs b/src/rules/stringtostringcorrection_ilp.rs new file mode 100644 index 00000000..13b33de2 --- /dev/null +++ b/src/rules/stringtostringcorrection_ilp.rs @@ -0,0 +1,408 @@ +//! Reduction from StringToStringCorrection to ILP (Integer Linear Programming). +//! +//! A time-expanded ILP with state variables z_{t,p,i} tracking token positions, +//! emptiness bits e_{t,p}, and operation selectors (delete, swap, no-op) at +//! each of K stages. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::StringToStringCorrection; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing StringToStringCorrection to ILP. +#[derive(Debug, Clone)] +pub struct ReductionSTSCToILP { + target: ILP, + n: usize, + bound: usize, +} + +// Index helper functions (free functions to avoid `Self::` ambiguity in trait impls). +fn idx_z(n: usize, t: usize, p: usize, i: usize) -> usize { + t * n * n + p * n + i +} + +fn idx_e(n: usize, k: usize, t: usize, p: usize) -> usize { + (k + 1) * n * n + t * n + p +} + +fn idx_d(n: usize, k: usize, t: usize, j: usize) -> usize { + (k + 1) * (n * n + n) + (t - 1) * n + j +} + +fn idx_s(n: usize, k: usize, t: usize, j: usize) -> usize { + let nm1 = n.saturating_sub(1); + (k + 1) * (n * n + n) + k * n + (t - 1) * nm1 + j +} + +fn idx_nu(n: usize, k: usize, t: usize) -> usize { + let nm1 = n.saturating_sub(1); + (k + 1) * (n * n + n) + k * n + k * nm1 + (t - 1) +} + +fn total_vars(n: usize, k: usize) -> usize { + let nm1 = n.saturating_sub(1); + (k + 1) * n * n + (k + 1) * n + k * n + k * nm1 + k +} + +impl ReductionResult for ReductionSTSCToILP { + type Source = StringToStringCorrection; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract operation sequence from ILP solution. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.n; + let k = self.bound; + let noop_code = 2 * n; + + if n == 0 { + return vec![noop_code; k]; + } + + let nm1 = n.saturating_sub(1); + let mut ops = Vec::with_capacity(k); + + for t in 1..=k { + // current length at step t-1 + let current_len = (0..n) + .filter(|&p| target_solution[idx_e(n, k, t - 1, p)] == 0) + .count(); + + if target_solution[idx_nu(n, k, t)] == 1 { + ops.push(noop_code); + } else { + let mut found = false; + for j in 0..n { + if target_solution[idx_d(n, k, t, j)] == 1 { + ops.push(j); + found = true; + break; + } + } + if !found { + for j in 0..nm1 { + if target_solution[idx_s(n, k, t, j)] == 1 { + ops.push(current_len + j); + found = true; + break; + } + } + if !found { + ops.push(noop_code); + } + } + } + } + ops + } +} + +#[reduction( + overhead = { + num_vars = "(bound + 1) * source_length * source_length + (bound + 1) * source_length + 2 * bound * source_length", + num_constraints = "(bound + 1) * source_length * source_length", + } +)] +impl ReduceTo> for StringToStringCorrection { + type Result = ReductionSTSCToILP; + + #[allow(clippy::needless_range_loop)] + fn reduce_to(&self) -> Self::Result { + let n = self.source_length(); + let m = self.target_length(); + let k = self.bound(); + let source = self.source(); + let target = self.target(); + + // If infeasible by length check, return trivially infeasible ILP + if m > n || m < n.saturating_sub(k) { + return ReductionSTSCToILP { + target: ILP::new( + 0, + vec![LinearConstraint::le(vec![], -1.0)], + vec![], + ObjectiveSense::Minimize, + ), + n, + bound: k, + }; + } + + // n == 0 edge case: source and target both empty, all no-ops + if n == 0 { + let nv = k; + let mut constraints = Vec::new(); + for t in 1..=k { + constraints.push(LinearConstraint::eq(vec![(t - 1, 1.0)], 1.0)); + } + return ReductionSTSCToILP { + target: ILP::new(nv, constraints, vec![], ObjectiveSense::Minimize), + n, + bound: k, + }; + } + + let nm1 = n.saturating_sub(1); + let nv = total_vars(n, k); + + let mut constraints = Vec::new(); + + // === State validity === + + // e_{t,p} + Σ_i z_{t,p,i} = 1 ∀ t,p + for t in 0..=k { + for p in 0..n { + let mut terms = vec![(idx_e(n, k, t, p), 1.0)]; + for i in 0..n { + terms.push((idx_z(n, t, p, i), 1.0)); + } + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + } + + // Σ_p z_{t,p,i} <= 1 ∀ t,i + for t in 0..=k { + for i in 0..n { + let terms: Vec<(usize, f64)> = (0..n).map(|p| (idx_z(n, t, p, i), 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, 1.0)); + } + } + + // e_{t,p} <= e_{t,p+1} ∀ t, p < n-1 + for t in 0..=k { + for p in 0..nm1 { + constraints.push(LinearConstraint::le( + vec![(idx_e(n, k, t, p), 1.0), (idx_e(n, k, t, p + 1), -1.0)], + 0.0, + )); + } + } + + // === Initial state === + for p in 0..n { + constraints.push(LinearConstraint::eq(vec![(idx_z(n, 0, p, p), 1.0)], 1.0)); + for i in 0..n { + if i != p { + constraints.push(LinearConstraint::eq(vec![(idx_z(n, 0, p, i), 1.0)], 0.0)); + } + } + constraints.push(LinearConstraint::eq(vec![(idx_e(n, k, 0, p), 1.0)], 0.0)); + } + + // === Operation choice === + for t in 1..=k { + let mut terms = Vec::new(); + for j in 0..n { + terms.push((idx_d(n, k, t, j), 1.0)); + } + for j in 0..nm1 { + terms.push((idx_s(n, k, t, j), 1.0)); + } + terms.push((idx_nu(n, k, t), 1.0)); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + // Legality + for t in 1..=k { + for j in 0..n { + constraints.push(LinearConstraint::le( + vec![(idx_d(n, k, t, j), 1.0), (idx_e(n, k, t - 1, j), 1.0)], + 1.0, + )); + } + for j in 0..nm1 { + constraints.push(LinearConstraint::le( + vec![(idx_s(n, k, t, j), 1.0), (idx_e(n, k, t - 1, j), 1.0)], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![(idx_s(n, k, t, j), 1.0), (idx_e(n, k, t - 1, j + 1), 1.0)], + 1.0, + )); + } + } + + // === State-update (M=1 big-M) === + for t in 1..=k { + for p in 0..n { + for i in 0..n { + // No-op + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, p, i), 1.0), + (idx_z(n, t - 1, p, i), -1.0), + (idx_nu(n, k, t), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, p, i), 1.0), + (idx_z(n, t, p, i), -1.0), + (idx_nu(n, k, t), 1.0), + ], + 1.0, + )); + + // Delete at position j + for j in 0..n { + if p < j { + // Before deleted position: unchanged + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, p, i), 1.0), + (idx_z(n, t - 1, p, i), -1.0), + (idx_d(n, k, t, j), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, p, i), 1.0), + (idx_z(n, t, p, i), -1.0), + (idx_d(n, k, t, j), 1.0), + ], + 1.0, + )); + } else if p + 1 < n { + // j <= p < n-1: shift from p+1 + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, p, i), 1.0), + (idx_z(n, t - 1, p + 1, i), -1.0), + (idx_d(n, k, t, j), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, p + 1, i), 1.0), + (idx_z(n, t, p, i), -1.0), + (idx_d(n, k, t, j), 1.0), + ], + 1.0, + )); + } else { + // p == n-1: last slot must be empty + constraints.push(LinearConstraint::le( + vec![(idx_z(n, t, n - 1, i), 1.0), (idx_d(n, k, t, j), 1.0)], + 1.0, + )); + } + } + + // Swap at position j + for j in 0..nm1 { + if p != j && p != j + 1 { + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, p, i), 1.0), + (idx_z(n, t - 1, p, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, p, i), 1.0), + (idx_z(n, t, p, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + } else if p == j { + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, j, i), 1.0), + (idx_z(n, t - 1, j + 1, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, j + 1, i), 1.0), + (idx_z(n, t, j, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + } else { + // p == j+1 + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t, j + 1, i), 1.0), + (idx_z(n, t - 1, j, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + constraints.push(LinearConstraint::le( + vec![ + (idx_z(n, t - 1, j, i), 1.0), + (idx_z(n, t, j + 1, i), -1.0), + (idx_s(n, k, t, j), 1.0), + ], + 1.0, + )); + } + } + } + } + } + + // === Final state equals target === + for p in 0..m { + let terms: Vec<(usize, f64)> = (0..n) + .filter(|&i| source[i] == target[p]) + .map(|i| (idx_z(n, k, p, i), 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + for p in m..n { + constraints.push(LinearConstraint::eq(vec![(idx_e(n, k, k, p), 1.0)], 1.0)); + } + + let target_ilp = ILP::new(nv, constraints, vec![], ObjectiveSense::Minimize); + ReductionSTSCToILP { + target: target_ilp, + n, + bound: k, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + vec![crate::example_db::specs::RuleExampleSpec { + id: "stringtostringcorrection_to_ilp", + build: || { + // source=[0,1,0], target=[1,0], bound=1 (delete position 0) + let source = StringToStringCorrection::new(2, vec![0, 1, 0], vec![1, 0], 1); + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&source); + let target_config = { + let ilp_solver = crate::solvers::ILPSolver::new(); + ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable") + }; + let source_config = reduction.extract_solution(&target_config); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/stringtostringcorrection_ilp.rs"] +mod tests; diff --git a/src/rules/strongconnectivityaugmentation_ilp.rs b/src/rules/strongconnectivityaugmentation_ilp.rs new file mode 100644 index 00000000..5680afad --- /dev/null +++ b/src/rules/strongconnectivityaugmentation_ilp.rs @@ -0,0 +1,220 @@ +//! Reduction from StrongConnectivityAugmentation to ILP. +//! +//! Select candidate arcs under the budget and certify strong connectivity by +//! sending flow both from a root to every vertex and back again. +//! See the paper entry for the full formulation. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::StrongConnectivityAugmentation; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionSCAToILP { + target: ILP, + num_candidates: usize, +} + +impl ReductionResult for ReductionSCAToILP { + type Source = StrongConnectivityAugmentation; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_candidates].to_vec() + } +} + +#[reduction( + overhead = { + num_vars = "num_potential_arcs + 2 * num_vertices * (num_arcs + num_potential_arcs)", + num_constraints = "1 + 2 * num_vertices * num_potential_arcs + 2 * num_vertices * num_vertices", + } +)] +impl ReduceTo> for StrongConnectivityAugmentation { + type Result = ReductionSCAToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let p = self.num_potential_arcs(); + + // Trivial: n ≤ 1 already strongly connected + if n <= 1 { + let target = ILP::new(p, vec![], vec![], ObjectiveSense::Minimize); + return ReductionSCAToILP { + target, + num_candidates: p, + }; + } + + let base_arcs = self.graph().arcs(); + let m = base_arcs.len(); + let root = 0; + + // Variable layout per paper: + // y_j: j [0, p) + // f^t_i (fwd base): p + t*m + i [p, p + n*m) + // f_bar^t_j (fwd cand): p + n*m + t*p + j [p+nm, p+nm+np) + // g^t_i (bwd base): p + n*(m+p) + t*m + i [p+n(m+p), p+n(2m+p)) + // g_bar^t_j (bwd cand): p + n*(2m+p) + t*p + j [p+n(2m+p), p+2n(m+p)) + let num_vars = p + 2 * n * (m + p); + let f_base = |t: usize, i: usize| -> usize { p + t * m + i }; + let f_cand = |t: usize, j: usize| -> usize { p + n * m + t * p + j }; + let g_base = |t: usize, i: usize| -> usize { p + n * (m + p) + t * m + i }; + let g_cand = |t: usize, j: usize| -> usize { p + n * (2 * m + p) + t * p + j }; + + let mut constraints = Vec::new(); + + // Binary bounds: y_j ≤ 1 + for j in 0..p { + constraints.push(LinearConstraint::le(vec![(j, 1.0)], 1.0)); + } + + // Budget: Σ w_j * y_j ≤ B + let budget_terms: Vec<(usize, f64)> = self + .candidate_arcs() + .iter() + .enumerate() + .map(|(j, &(_, _, w))| (j, w as f64)) + .collect(); + constraints.push(LinearConstraint::le(budget_terms, *self.bound() as f64)); + + for t in 0..n { + if t == root { + // Pin all flow vars to 0 for dummy commodity t = root + for i in 0..m { + constraints.push(LinearConstraint::eq(vec![(f_base(t, i), 1.0)], 0.0)); + constraints.push(LinearConstraint::eq(vec![(g_base(t, i), 1.0)], 0.0)); + } + for j in 0..p { + constraints.push(LinearConstraint::eq(vec![(f_cand(t, j), 1.0)], 0.0)); + constraints.push(LinearConstraint::eq(vec![(g_cand(t, j), 1.0)], 0.0)); + } + continue; + } + + // Activation: f_bar^t_j ≤ y_j and g_bar^t_j ≤ y_j + for j in 0..p { + constraints.push(LinearConstraint::le( + vec![(f_cand(t, j), 1.0), (j, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(g_cand(t, j), 1.0), (j, -1.0)], + 0.0, + )); + } + + // Forward flow conservation (root → t): for each vertex v + for v in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + + // Base arcs + for (i, &(u_a, v_a)) in base_arcs.iter().enumerate() { + if u_a == v { + terms.push((f_base(t, i), 1.0)); // outgoing + } + if v_a == v { + terms.push((f_base(t, i), -1.0)); // incoming + } + } + + // Candidate arcs + for (j, &(sj, tj, _)) in self.candidate_arcs().iter().enumerate() { + if sj == v { + terms.push((f_cand(t, j), 1.0)); // outgoing + } + if tj == v { + terms.push((f_cand(t, j), -1.0)); // incoming + } + } + + let rhs = if v == root { + 1.0 + } else if v == t { + -1.0 + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + + // Backward flow conservation (t → root): for each vertex v + for v in 0..n { + let mut terms: Vec<(usize, f64)> = Vec::new(); + + // Base arcs + for (i, &(u_a, v_a)) in base_arcs.iter().enumerate() { + if u_a == v { + terms.push((g_base(t, i), 1.0)); + } + if v_a == v { + terms.push((g_base(t, i), -1.0)); + } + } + + // Candidate arcs + for (j, &(sj, tj, _)) in self.candidate_arcs().iter().enumerate() { + if sj == v { + terms.push((g_cand(t, j), 1.0)); + } + if tj == v { + terms.push((g_cand(t, j), -1.0)); + } + } + + let rhs = if v == t { + 1.0 // source of backward flow + } else if v == root { + -1.0 // sink of backward flow + } else { + 0.0 + }; + constraints.push(LinearConstraint::eq(terms, rhs)); + } + } + + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + ReductionSCAToILP { + target, + num_candidates: p, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::DirectedGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "strongconnectivityaugmentation_to_ilp", + build: || { + // Path 0→1→2, candidates: (2,0,1),(1,0,2), bound=2 + let source = StrongConnectivityAugmentation::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![(2, 0, 1), (1, 0, 2)], + 2, + ); + let reduction: ReductionSCAToILP = + crate::rules::ReduceTo::>::reduce_to(&source); + let ilp_sol = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: extracted, + target_config: ilp_sol, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/strongconnectivityaugmentation_ilp.rs"] +mod tests; diff --git a/src/rules/subgraphisomorphism_ilp.rs b/src/rules/subgraphisomorphism_ilp.rs new file mode 100644 index 00000000..0dba403e --- /dev/null +++ b/src/rules/subgraphisomorphism_ilp.rs @@ -0,0 +1,131 @@ +//! Reduction from SubgraphIsomorphism to ILP (Integer Linear Programming). +//! +//! Injective assignment with non-edge constraints: +//! - Binary x_{v,u}: pattern vertex v maps to host vertex u +//! - Assignment: each pattern vertex to exactly one host vertex +//! - Injectivity: each host vertex receives at most one pattern vertex +//! - Non-edge forbiddance: for each pattern edge {v,w} and each host non-edge {u,u'}, +//! x_{v,u} + x_{w,u'} <= 1 AND x_{v,u'} + x_{w,u} <= 1 + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::graph::SubgraphIsomorphism; +use crate::reduction; +use crate::rules::ilp_helpers::one_hot_assignment_constraints; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::Graph; + +/// Result of reducing SubgraphIsomorphism to ILP. +/// +/// Variable layout (all binary): +/// - `x_{v,u}` at index `v * n_host + u` for pattern vertex v, host vertex u +#[derive(Debug, Clone)] +pub struct ReductionSubIsoToILP { + target: ILP, + num_pattern_vertices: usize, + num_host_vertices: usize, +} + +impl ReductionResult for ReductionSubIsoToILP { + type Source = SubgraphIsomorphism; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: for each pattern vertex v, output the unique host vertex u with x_{v,u} = 1. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n_host = self.num_host_vertices; + (0..self.num_pattern_vertices) + .map(|v| { + (0..n_host) + .find(|&u| target_solution[v * n_host + u] == 1) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_pattern_vertices * num_host_vertices", + num_constraints = "num_pattern_vertices + num_host_vertices + num_pattern_edges * num_host_vertices^2", + } +)] +impl ReduceTo> for SubgraphIsomorphism { + type Result = ReductionSubIsoToILP; + + fn reduce_to(&self) -> Self::Result { + let n_pat = self.num_pattern_vertices(); + let n_host = self.num_host_vertices(); + let host = self.host_graph(); + let pattern = self.pattern_graph(); + let pat_edges = pattern.edges(); + + let num_vars = n_pat * n_host; + + let mut constraints = Vec::new(); + + // Assignment constraints + constraints.extend(one_hot_assignment_constraints(n_pat, n_host, 0)); + + // Non-edge forbiddance: for each pattern edge {v,w} and each host non-edge {u,u'} + for &(v, w) in &pat_edges { + for u in 0..n_host { + for u_prime in 0..n_host { + if u == u_prime { + continue; + } + if host.has_edge(u, u_prime) { + continue; + } + // x_{v,u} + x_{w,u'} <= 1 + constraints.push(LinearConstraint::le( + vec![(v * n_host + u, 1.0), (w * n_host + u_prime, 1.0)], + 1.0, + )); + } + } + } + + // Feasibility: no objective + let target = ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize); + + ReductionSubIsoToILP { + target, + num_pattern_vertices: n_pat, + num_host_vertices: n_host, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::topology::SimpleGraph; + vec![crate::example_db::specs::RuleExampleSpec { + id: "subgraphisomorphism_to_ilp", + build: || { + // Host: C4, Pattern: P3 (path on 3 vertices embeddable in cycle) + let host = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let source = SubgraphIsomorphism::new(host, pattern); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/subgraphisomorphism_ilp.rs"] +mod tests; diff --git a/src/rules/timetabledesign_ilp.rs b/src/rules/timetabledesign_ilp.rs new file mode 100644 index 00000000..4cbb3f38 --- /dev/null +++ b/src/rules/timetabledesign_ilp.rs @@ -0,0 +1,131 @@ +//! Reduction from TimetableDesign to ILP. +//! +//! The source witness is a binary craftsman-task-period incidence table, +//! and all feasibility conditions are already linear: availability forcing, +//! per-period exclusivity, and exact pairwise work requirements. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::TimetableDesign; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing TimetableDesign to ILP. +/// +/// Variable layout: x_{c,t,h} at index `((c * num_tasks) + t) * num_periods + h` +/// exactly matching the source configuration layout. +#[derive(Debug, Clone)] +pub struct ReductionTDToILP { + target: ILP, +} + +impl ReductionResult for ReductionTDToILP { + type Source = TimetableDesign; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + /// Extract: direct identity mapping — the ILP variable layout matches the + /// source configuration layout exactly. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { + num_vars = "num_craftsmen * num_tasks * num_periods", + num_constraints = "num_craftsmen * num_periods + num_tasks * num_periods + num_craftsmen * num_tasks", +})] +impl ReduceTo> for TimetableDesign { + type Result = ReductionTDToILP; + + fn reduce_to(&self) -> Self::Result { + let nc = self.num_craftsmen(); + let nt = self.num_tasks(); + let nh = self.num_periods(); + let num_vars = nc * nt * nh; + + let var = |c: usize, t: usize, h: usize| -> usize { ((c * nt) + t) * nh + h }; + + let mut constraints = Vec::new(); + + // 1. Availability: x_{c,t,h} = 0 whenever craftsman c or task t is unavailable in h + for c in 0..nc { + for t in 0..nt { + for h in 0..nh { + if !self.craftsman_avail()[c][h] || !self.task_avail()[t][h] { + constraints.push(LinearConstraint::eq(vec![(var(c, t, h), 1.0)], 0.0)); + } + } + } + } + + // 2. Each craftsman works on at most one task per period: Σ_t x_{c,t,h} <= 1 for all c, h + for c in 0..nc { + for h in 0..nh { + let terms: Vec<(usize, f64)> = (0..nt).map(|t| (var(c, t, h), 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, 1.0)); + } + } + + // 3. Each task worked on by at most one craftsman per period: Σ_c x_{c,t,h} <= 1 for all t, h + for t in 0..nt { + for h in 0..nh { + let terms: Vec<(usize, f64)> = (0..nc).map(|c| (var(c, t, h), 1.0)).collect(); + constraints.push(LinearConstraint::le(terms, 1.0)); + } + } + + // 4. Exact requirements: Σ_h x_{c,t,h} = r_{c,t} for all c, t + for c in 0..nc { + for t in 0..nt { + let terms: Vec<(usize, f64)> = (0..nh).map(|h| (var(c, t, h), 1.0)).collect(); + constraints.push(LinearConstraint::eq( + terms, + self.requirements()[c][t] as f64, + )); + } + } + + ReductionTDToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "timetabledesign_to_ilp", + build: || { + // Small 2-craftsman, 2-task, 2-period instance + let source = TimetableDesign::new( + 2, + 2, + 2, + vec![vec![true, true], vec![true, true]], + vec![vec![true, true], vec![true, true]], + vec![vec![1, 0], vec![0, 1]], + ); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = crate::solvers::ILPSolver::new() + .solve(reduction.target_problem()) + .expect("canonical example must be solvable"); + let source_config = reduction.extract_solution(&ilp_solution); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config, + target_config: ilp_solution, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/timetabledesign_ilp.rs"] +mod tests; diff --git a/src/unit_tests/rules/acyclicpartition_ilp.rs b/src/unit_tests/rules/acyclicpartition_ilp.rs new file mode 100644 index 00000000..3aa3c8cc --- /dev/null +++ b/src/unit_tests/rules/acyclicpartition_ilp.rs @@ -0,0 +1,80 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::AcyclicPartition; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +fn small_instance() -> AcyclicPartition { + // Chain 0->1->2->3, unit weights, unit arc costs, B=3, K=2 + AcyclicPartition::new( + DirectedGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1, 1], + vec![1, 1, 1], + 3, + 2, + ) +} + +#[test] +fn test_acyclicpartition_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionAcyclicPartitionToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + + // Solve source with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&source); + assert!(!bf_solutions.is_empty(), "source should be satisfiable"); + + // Solve ILP + let ilp_solver = ILPSolver::new(); + let ilp_sol = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + + assert!( + source.evaluate(&extracted).0, + "extracted solution must be valid" + ); +} + +#[test] +fn test_reduction_num_vars() { + let source = small_instance(); + let reduction: ReductionAcyclicPartitionToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + // n=4, m=3: n^2 + m*n + m + 2*n = 16 + 12 + 3 + 8 = 39 + assert_eq!(ilp.num_vars, 39); +} + +#[test] +fn test_extract_solution() { + let source = small_instance(); + let reduction: ReductionAcyclicPartitionToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert_eq!(extracted.len(), 4); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_infeasible_instance() { + // Cycle 0->1->2->0, B=1, K=0. + // Each partition can hold weight <= 1 (one vertex each), + // so 3 separate partitions with crossing cost = 3 > K=0. + // Can't merge either since weight > B=1. + let source = AcyclicPartition::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2), (2, 0)]), + vec![1, 1, 1], + vec![1, 1, 1], + 1, + 0, + ); + let reduction: ReductionAcyclicPartitionToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + assert!(solver.solve(ilp).is_none()); +} diff --git a/src/unit_tests/rules/balancedcompletebipartitesubgraph_ilp.rs b/src/unit_tests/rules/balancedcompletebipartitesubgraph_ilp.rs new file mode 100644 index 00000000..6ba457bd --- /dev/null +++ b/src/unit_tests/rules/balancedcompletebipartitesubgraph_ilp.rs @@ -0,0 +1,60 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::BalancedCompleteBipartiteSubgraph; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::ReduceTo; +use crate::topology::BipartiteGraph; +use crate::traits::Problem; + +fn small_instance() -> BalancedCompleteBipartiteSubgraph { + // L={0,1,2}, R={0,1,2} + // Edges: (0,0),(0,1),(1,0),(1,1),(2,1),(2,2) + // K_{2,2} subgraph: L={0,1}, R={0,1} + BalancedCompleteBipartiteSubgraph::new( + BipartiteGraph::new(3, 3, vec![(0, 0), (0, 1), (1, 0), (1, 1), (2, 1), (2, 2)]), + 2, + ) +} + +#[test] +fn test_balancedcompletebipartitesubgraph_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionBCBSToILP = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "BCBS -> ILP round trip", + ); +} + +#[test] +fn test_reduction_shape() { + let source = small_instance(); + let reduction: ReductionBCBSToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + // 6 variables (3 left + 3 right) + assert_eq!(ilp.num_vars, 6); +} + +#[test] +fn test_infeasible_instance() { + // No K_{3,3}: not all edges present + let source = BalancedCompleteBipartiteSubgraph::new( + BipartiteGraph::new(3, 3, vec![(0, 0), (0, 1), (1, 0), (1, 1)]), + 3, + ); + let reduction: ReductionBCBSToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = crate::solvers::ILPSolver::new(); + assert!(solver.solve(ilp).is_none()); +} + +#[test] +fn test_extract_solution_identity() { + let source = small_instance(); + let reduction: ReductionBCBSToILP = ReduceTo::>::reduce_to(&source); + let target_sol = vec![1, 1, 0, 1, 1, 0]; + let extracted = reduction.extract_solution(&target_sol); + assert_eq!(extracted, vec![1, 1, 0, 1, 1, 0]); + assert!(source.evaluate(&extracted).0); +} diff --git a/src/unit_tests/rules/bicliquecover_ilp.rs b/src/unit_tests/rules/bicliquecover_ilp.rs new file mode 100644 index 00000000..e3617056 --- /dev/null +++ b/src/unit_tests/rules/bicliquecover_ilp.rs @@ -0,0 +1,63 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::BicliqueCover; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::ReduceTo; +use crate::topology::BipartiteGraph; +use crate::traits::Problem; + +fn small_instance() -> BicliqueCover { + // L={0,1}, R={0,1,2}, edges: (0,0),(0,1),(1,1),(1,2), k=2 + BicliqueCover::new( + BipartiteGraph::new(2, 3, vec![(0, 0), (0, 1), (1, 1), (1, 2)]), + 2, + ) +} + +#[test] +fn test_bicliquecover_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionBicliqueCoverToILP = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "BicliqueCover -> ILP round trip", + ); +} + +#[test] +fn test_reduction_shape() { + let source = small_instance(); + let reduction: ReductionBicliqueCoverToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + // n=5, k=2, left=2, right=3 + // x vars: 5*2=10, z vars: 2*3*2=12, total=22 + assert_eq!(ilp.num_vars, 22); +} + +#[test] +fn test_ilp_solution_is_valid_cover() { + let source = small_instance(); + let reduction: ReductionBicliqueCoverToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = crate::solvers::ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + let value = source.evaluate(&extracted); + assert!( + value.0.is_some(), + "extracted solution should be a valid cover" + ); +} + +#[test] +fn test_single_edge() { + // Single edge needs 1 biclique + let source = BicliqueCover::new(BipartiteGraph::new(1, 1, vec![(0, 0)]), 1); + let reduction: ReductionBicliqueCoverToILP = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "single edge biclique cover", + ); +} diff --git a/src/unit_tests/rules/biconnectivityaugmentation_ilp.rs b/src/unit_tests/rules/biconnectivityaugmentation_ilp.rs new file mode 100644 index 00000000..6ae7488c --- /dev/null +++ b/src/unit_tests/rules/biconnectivityaugmentation_ilp.rs @@ -0,0 +1,79 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::BiconnectivityAugmentation; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn small_instance() -> BiconnectivityAugmentation { + // Path 0-1-2-3, candidates: (0,2,1),(0,3,2),(1,3,1), budget=3 + BiconnectivityAugmentation::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![(0, 2, 1), (0, 3, 2), (1, 3, 1)], + 3, + ) +} + +#[test] +fn test_biconnectivityaugmentation_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionBiconnAugToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + + // Solve source with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&source); + assert!(!bf_solutions.is_empty(), "source should be satisfiable"); + + // Solve ILP + let ilp_solver = ILPSolver::new(); + let ilp_sol = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + + assert!( + source.evaluate(&extracted).0, + "extracted solution must be valid" + ); +} + +#[test] +fn test_extract_solution() { + let source = small_instance(); + let reduction: ReductionBiconnAugToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert_eq!(extracted.len(), 3); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_trivial_single_vertex() { + let source = BiconnectivityAugmentation::new(SimpleGraph::new(1, vec![]), vec![], 0); + let reduction: ReductionBiconnAugToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("trivial ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_already_biconnected() { + // Triangle is already biconnected + let source = BiconnectivityAugmentation::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![], + 0, + ); + let reduction: ReductionBiconnAugToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver + .solve(ilp) + .expect("already biconnected should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert!(source.evaluate(&extracted).0); +} diff --git a/src/unit_tests/rules/bmf_ilp.rs b/src/unit_tests/rules/bmf_ilp.rs new file mode 100644 index 00000000..e571d768 --- /dev/null +++ b/src/unit_tests/rules/bmf_ilp.rs @@ -0,0 +1,58 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +#[test] +fn test_bmf_to_ilp_structure() { + // 2x2 identity matrix, rank 1 + let problem = BMF::new(vec![vec![true, false], vec![false, true]], 1); + let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // b: 2*1=2, c: 1*2=2, p: 2*1*2=4, w: 2*2=4, e: 2*2=4 => 16 + assert_eq!(ilp.num_vars, 16); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_bmf_to_ilp_closed_loop() { + // 2x2 identity, rank 2 — exact factorization exists + let problem = BMF::new(vec![vec![true, false], vec![false, true]], 2); + let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "BMF->ILP closed loop", + ); +} + +#[test] +fn test_bmf_to_ilp_bf_vs_ilp() { + let problem = BMF::new(vec![vec![true, true], vec![true, false]], 1); + let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should have a witness"); + let bf_value = problem.evaluate(&bf_witness); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); +} + +#[test] +fn test_bmf_to_ilp_trivial() { + // 1x1 matrix, rank 1 + let problem = BMF::new(vec![vec![true]], 1); + let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // b: 1, c: 1, p: 1, w: 1, e: 1 => 5 + assert_eq!(ilp.num_vars, 5); +} diff --git a/src/unit_tests/rules/bottlenecktravelingsalesman_ilp.rs b/src/unit_tests/rules/bottlenecktravelingsalesman_ilp.rs new file mode 100644 index 00000000..fbe2a0e4 --- /dev/null +++ b/src/unit_tests/rules/bottlenecktravelingsalesman_ilp.rs @@ -0,0 +1,98 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn k4_btsp() -> BottleneckTravelingSalesman { + BottleneckTravelingSalesman::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]), + vec![1, 3, 2, 4, 2, 1], + ) +} + +#[test] +fn test_reduction_creates_valid_ilp() { + let problem = k4_btsp(); + let reduction: ReductionBTSPToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // n=4, m=6: num_x=16, num_z=2*6*4=48, b=1, total=65 + assert_eq!(ilp.num_vars, 65); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_bottlenecktravelingsalesman_to_ilp_closed_loop() { + let problem = k4_btsp(); + let bf = BruteForce::new(); + let bf_solution = bf.find_witness(&problem).expect("brute-force optimum"); + let bf_value = problem.evaluate(&bf_solution); + + let reduction: ReductionBTSPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert!( + ilp_value.is_valid(), + "Extracted solution should be a valid Hamiltonian cycle" + ); + assert_eq!( + ilp_value, bf_value, + "ILP and brute-force should agree on optimal value" + ); +} + +#[test] +fn test_bottlenecktravelingsalesman_to_ilp_c4() { + // C4 with varying weights: bottleneck = max weight in the only cycle + let problem = BottleneckTravelingSalesman::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]), + vec![1, 2, 3, 4], + ); + let bf = BruteForce::new(); + let bf_solution = bf.find_witness(&problem).expect("brute-force optimum"); + let bf_value = problem.evaluate(&bf_solution); + + let reduction: ReductionBTSPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert!(ilp_value.is_valid()); + assert_eq!(ilp_value, bf_value); +} + +#[test] +fn test_solution_extraction() { + let problem = k4_btsp(); + let reduction: ReductionBTSPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid()); +} + +#[test] +fn test_no_hamiltonian_cycle_infeasible() { + // Path graph: no Hamiltonian cycle + let problem = BottleneckTravelingSalesman::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1], + ); + let reduction: ReductionBTSPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!( + result.is_none(), + "Path graph should have no Hamiltonian cycle" + ); +} diff --git a/src/unit_tests/rules/boundedcomponentspanningforest_ilp.rs b/src/unit_tests/rules/boundedcomponentspanningforest_ilp.rs new file mode 100644 index 00000000..08836b80 --- /dev/null +++ b/src/unit_tests/rules/boundedcomponentspanningforest_ilp.rs @@ -0,0 +1,85 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::BoundedComponentSpanningForest; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn small_instance() -> BoundedComponentSpanningForest { + // Path 0-1-2-3, weights [1,2,2,1], K=2, B=4 + BoundedComponentSpanningForest::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 2, 2, 1], + 2, + 4, + ) +} + +#[test] +fn test_boundedcomponentspanningforest_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionBCSFToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + + // Solve source with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&source); + assert!(!bf_solutions.is_empty(), "source should be satisfiable"); + + // Solve ILP + let ilp_solver = ILPSolver::new(); + let ilp_sol = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + + assert!( + source.evaluate(&extracted).0, + "extracted solution must be valid" + ); +} + +#[test] +fn test_extract_solution() { + let source = small_instance(); + let reduction: ReductionBCSFToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert_eq!(extracted.len(), 4); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_single_component() { + // All in one component + let source = BoundedComponentSpanningForest::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2)]), + vec![1, 1, 1], + 1, + 3, + ); + let reduction: ReductionBCSFToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver + .solve(ilp) + .expect("single component should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_infeasible_instance() { + // 4 vertices, weights [3,3,3,3], K=2, B=5 -> total weight 12, max per component 5, need at least 3 components + let source = BoundedComponentSpanningForest::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![3, 3, 3, 3], + 2, + 5, + ); + let reduction: ReductionBCSFToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + assert!(solver.solve(ilp).is_none()); +} diff --git a/src/unit_tests/rules/consecutiveblockminimization_ilp.rs b/src/unit_tests/rules/consecutiveblockminimization_ilp.rs new file mode 100644 index 00000000..42a2be73 --- /dev/null +++ b/src/unit_tests/rules/consecutiveblockminimization_ilp.rs @@ -0,0 +1,64 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_cbm_to_ilp_structure() { + let problem = ConsecutiveBlockMinimization::new( + vec![vec![true, false, true], vec![false, true, true]], + 2, + ); + let reduction: ReductionCBMToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 3*3=9, a: 2*3=6, b: 2*3=6 => 21 + assert_eq!(ilp.num_vars, 21); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_cbm_to_ilp_closed_loop() { + let problem = ConsecutiveBlockMinimization::new( + vec![vec![true, false, true], vec![false, true, true]], + 2, + ); + let reduction: ReductionCBMToILP = ReduceTo::>::reduce_to(&problem); + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "ConsecutiveBlockMinimization->ILP closed loop", + ); +} + +#[test] +fn test_cbm_to_ilp_bf_vs_ilp() { + let problem = ConsecutiveBlockMinimization::new( + vec![vec![true, false, true], vec![false, true, true]], + 2, + ); + let reduction: ReductionCBMToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_cbm_to_ilp_trivial() { + // 1x1 matrix, bound 1 + let problem = ConsecutiveBlockMinimization::new(vec![vec![true]], 1); + let reduction: ReductionCBMToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 1, a: 1, b: 1 => 3 + assert_eq!(ilp.num_vars, 3); +} diff --git a/src/unit_tests/rules/consecutiveonesmatrixaugmentation_ilp.rs b/src/unit_tests/rules/consecutiveonesmatrixaugmentation_ilp.rs new file mode 100644 index 00000000..0c7f7d62 --- /dev/null +++ b/src/unit_tests/rules/consecutiveonesmatrixaugmentation_ilp.rs @@ -0,0 +1,71 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_coma_to_ilp_structure() { + let problem = ConsecutiveOnesMatrixAugmentation::new( + vec![vec![true, false, true], vec![false, true, true]], + 1, + ); + let reduction: ReductionCOMAToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 3*3=9, a+l+u+h+f: 5*2*3=30 => 39 + assert_eq!(ilp.num_vars, 39); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_coma_to_ilp_closed_loop() { + let problem = ConsecutiveOnesMatrixAugmentation::new( + vec![vec![true, false, true], vec![false, true, true]], + 1, + ); + let reduction: ReductionCOMAToILP = ReduceTo::>::reduce_to(&problem); + + // Use ILP solver instead of brute-force on the target (39 binary vars too large) + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); + + // Also verify that brute-force on the source agrees + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); +} + +#[test] +fn test_coma_to_ilp_bf_vs_ilp() { + let problem = ConsecutiveOnesMatrixAugmentation::new( + vec![vec![true, false, true], vec![false, true, true]], + 1, + ); + let reduction: ReductionCOMAToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_coma_to_ilp_trivial() { + // 1x1 matrix, bound 0 — already consecutive + let problem = ConsecutiveOnesMatrixAugmentation::new(vec![vec![true]], 0); + let reduction: ReductionCOMAToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 1, a+l+u+h+f: 5*1=5 => 6 + assert_eq!(ilp.num_vars, 6); +} diff --git a/src/unit_tests/rules/consecutiveonessubmatrix_ilp.rs b/src/unit_tests/rules/consecutiveonessubmatrix_ilp.rs new file mode 100644 index 00000000..04002099 --- /dev/null +++ b/src/unit_tests/rules/consecutiveonessubmatrix_ilp.rs @@ -0,0 +1,88 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_cos_to_ilp_structure() { + // Tucker matrix (3x4), K=3 + let problem = ConsecutiveOnesSubmatrix::new( + vec![ + vec![true, true, false, true], + vec![true, false, true, true], + vec![false, true, true, false], + ], + 3, + ); + let reduction: ReductionCOSToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // s: 4, x: 4*3=12, a+l+u+h+f: 5*3*3=45 => 61 + assert_eq!(ilp.num_vars, 61); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_cos_to_ilp_closed_loop() { + // Tucker matrix (3x4), K=3 + let problem = ConsecutiveOnesSubmatrix::new( + vec![ + vec![true, true, false, true], + vec![true, false, true, true], + vec![false, true, true, false], + ], + 3, + ); + let reduction: ReductionCOSToILP = ReduceTo::>::reduce_to(&problem); + + // Use ILP solver (61 binary vars too large for brute force on target) + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); + + // Verify brute-force on source agrees + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); +} + +#[test] +fn test_cos_to_ilp_bf_vs_ilp() { + let problem = ConsecutiveOnesSubmatrix::new( + vec![ + vec![true, true, false, true], + vec![true, false, true, true], + vec![false, true, true, false], + ], + 3, + ); + let reduction: ReductionCOSToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_cos_to_ilp_trivial() { + // 2x2 identity, K=2 + let problem = ConsecutiveOnesSubmatrix::new(vec![vec![true, false], vec![false, true]], 2); + let reduction: ReductionCOSToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/disjointconnectingpaths_ilp.rs b/src/unit_tests/rules/disjointconnectingpaths_ilp.rs new file mode 100644 index 00000000..645e0797 --- /dev/null +++ b/src/unit_tests/rules/disjointconnectingpaths_ilp.rs @@ -0,0 +1,22 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::ReduceTo; +use crate::topology::SimpleGraph; + +#[test] +fn test_disjointconnectingpaths_to_ilp_closed_loop() { + // 6 vertices, two vertex-disjoint paths available: + // Path (0,2): 0 - 1 - 2 (interior vertex 1, not a terminal) + // Path (3,5): 3 - 4 - 5 (interior vertex 4, not a terminal) + let source = DisjointConnectingPaths::new( + SimpleGraph::new(6, vec![(0, 1), (1, 2), (3, 4), (4, 5)]), + vec![(0, 2), (3, 5)], + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "DisjointConnectingPaths->ILP closed loop", + ); +} diff --git a/src/unit_tests/rules/flowshopscheduling_ilp.rs b/src/unit_tests/rules/flowshopscheduling_ilp.rs new file mode 100644 index 00000000..23195381 --- /dev/null +++ b/src/unit_tests/rules/flowshopscheduling_ilp.rs @@ -0,0 +1,67 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_flowshopscheduling_to_ilp_closed_loop() { + // 2 machines, 3 jobs, deadline 10 + let problem = FlowShopScheduling::new(2, vec![vec![2, 3], vec![3, 2], vec![1, 4]], 10); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf + .find_witness(&problem) + .expect("feasible instance should have a witness"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP extracted solution should be a valid schedule" + ); +} + +#[test] +fn test_flowshopscheduling_to_ilp_infeasible() { + // 2 machines, 3 jobs with large processing times, very tight deadline + let problem = FlowShopScheduling::new(2, vec![vec![5, 5], vec![5, 5], vec![5, 5]], 6); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible FSS should produce infeasible ILP" + ); +} + +#[test] +fn test_flowshopscheduling_to_ilp_single_job() { + // 2 machines, 1 job, deadline 10 + let problem = FlowShopScheduling::new(2, vec![vec![3, 4]], 10); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("single-job ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_flowshopscheduling_to_ilp_bf_vs_ilp() { + let problem = FlowShopScheduling::new(2, vec![vec![2, 3], vec![3, 2], vec![1, 4]], 10); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/hamiltonianpath_ilp.rs b/src/unit_tests/rules/hamiltonianpath_ilp.rs new file mode 100644 index 00000000..bc25df0e --- /dev/null +++ b/src/unit_tests/rules/hamiltonianpath_ilp.rs @@ -0,0 +1,88 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Path P3: 0-1-2 + let problem = HamiltonianPath::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction: ReductionHamiltonianPathToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // n=3, m=2, n_pos=2 + // num_x = 9, num_z = 2*2*2 = 8, total = 17 + assert_eq!(ilp.num_vars, 17); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_hamiltonianpath_to_ilp_closed_loop() { + // Path graph: 0-1-2-3 (has Hamiltonian path) + let problem = HamiltonianPath::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + // BruteForce on source to verify feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionHamiltonianPathToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should satisfy the HamiltonianPath constraint" + ); +} + +#[test] +fn test_hamiltonianpath_to_ilp_cycle_graph() { + // C4: 0-1-2-3-0 (has multiple Hamiltonian paths) + let problem = HamiltonianPath::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)])); + // BruteForce on source + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionHamiltonianPathToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_hamiltonianpath_to_ilp_no_path() { + // Disconnected graph: no Hamiltonian path + let problem = HamiltonianPath::new(SimpleGraph::new(4, vec![(0, 1), (2, 3)])); + let reduction: ReductionHamiltonianPathToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!( + result.is_none(), + "Disconnected graph should have no Hamiltonian path" + ); +} + +#[test] +fn test_solution_extraction() { + let problem = HamiltonianPath::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction: ReductionHamiltonianPathToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/ilp_helpers.rs b/src/unit_tests/rules/ilp_helpers.rs index 0036098c..40eda271 100644 --- a/src/unit_tests/rules/ilp_helpers.rs +++ b/src/unit_tests/rules/ilp_helpers.rs @@ -1,5 +1,5 @@ use super::*; -use crate::models::algebraic::{Comparison, LinearConstraint}; +use crate::models::algebraic::Comparison; #[test] fn test_mccormick_product_constraints() { @@ -123,9 +123,9 @@ fn test_one_hot_decode_permutation() { // 3x3 assignment: item 0 at slot 2, item 1 at slot 0, item 2 at slot 1 // Layout: x_{v*3+p} let mut solution = vec![0usize; 9]; - solution[0 * 3 + 2] = 1; // item 0 -> slot 2 - solution[1 * 3 + 0] = 1; // item 1 -> slot 0 - solution[2 * 3 + 1] = 1; // item 2 -> slot 1 + solution[2] = 1; // item 0 -> slot 2 + solution[3] = 1; // item 1 -> slot 0 + solution[7] = 1; // item 2 -> slot 1 let decoded = one_hot_decode(&solution, 3, 3, 0); assert_eq!(decoded, vec![1, 2, 0]); // slot 0 gets item 1, slot 1 gets item 2, slot 2 gets item 0 } @@ -134,9 +134,9 @@ fn test_one_hot_decode_permutation() { fn test_one_hot_decode_with_offset() { // Same as above but with offset=5 let mut solution = vec![0usize; 14]; - solution[5 + 0 * 3 + 2] = 1; - solution[5 + 1 * 3 + 0] = 1; - solution[5 + 2 * 3 + 1] = 1; + solution[7] = 1; // 5 + 2 + solution[8] = 1; // 5 + 3 + solution[12] = 1; // 5 + 7 let decoded = one_hot_decode(&solution, 3, 3, 5); assert_eq!(decoded, vec![1, 2, 0]); } diff --git a/src/unit_tests/rules/integralflowhomologousarcs_ilp.rs b/src/unit_tests/rules/integralflowhomologousarcs_ilp.rs new file mode 100644 index 00000000..2975222a --- /dev/null +++ b/src/unit_tests/rules/integralflowhomologousarcs_ilp.rs @@ -0,0 +1,32 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +#[test] +fn test_integralflowhomologousarcs_to_ilp_closed_loop() { + // 4 vertices, arcs (0,1),(0,2),(1,3),(2,3), caps all 2, req 2, pair (0,1) + let source = IntegralFlowHomologousArcs::new( + DirectedGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + vec![2, 2, 2, 2], + 0, + 3, + 2, + vec![(0, 1)], + ); + // Verify source is satisfiable via brute force + let direct = BruteForce::new() + .find_witness(&source) + .expect("source instance should be satisfiable"); + assert!(source.evaluate(&direct)); + + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/integralflowwithmultipliers_ilp.rs b/src/unit_tests/rules/integralflowwithmultipliers_ilp.rs new file mode 100644 index 00000000..82693d36 --- /dev/null +++ b/src/unit_tests/rules/integralflowwithmultipliers_ilp.rs @@ -0,0 +1,31 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +#[test] +fn test_integralflowwithmultipliers_to_ilp_closed_loop() { + // 4 vertices, arcs (0,1),(0,2),(1,3),(2,3), multipliers all 1, caps all 2, req 2 + let source = IntegralFlowWithMultipliers::new( + DirectedGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + 0, + 3, + vec![1, 1, 1, 1], + vec![2, 2, 2, 2], + 2, + ); + let direct = BruteForce::new() + .find_witness(&source) + .expect("source instance should be satisfiable"); + assert!(source.evaluate(&direct)); + + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/isomorphicspanningtree_ilp.rs b/src/unit_tests/rules/isomorphicspanningtree_ilp.rs new file mode 100644 index 00000000..47f8a229 --- /dev/null +++ b/src/unit_tests/rules/isomorphicspanningtree_ilp.rs @@ -0,0 +1,73 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // K3, path tree + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let tree = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = IsomorphicSpanningTree::new(graph, tree); + let reduction: ReductionISTToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars(), 9); // 3x3 + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + assert!(ilp.objective.is_empty()); +} + +#[test] +fn test_isomorphicspanningtree_to_ilp_closed_loop() { + let graph = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let tree = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3)]); + let problem = IsomorphicSpanningTree::new(graph, tree); + let reduction: ReductionISTToILP = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_satisfaction_target( + &problem, + &reduction, + "IsomorphicSpanningTree->ILP closed loop", + ); +} + +#[test] +fn test_isomorphicspanningtree_to_ilp_bf_vs_ilp() { + let graph = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (0, 3)]); + let tree = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]); + let problem = IsomorphicSpanningTree::new(graph, tree); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + assert!( + bf_witness.is_some(), + "BF should find a satisfying assignment" + ); + + let reduction: ReductionISTToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_solution_extraction() { + // K3 with path tree + let graph = SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]); + let tree = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = IsomorphicSpanningTree::new(graph, tree); + let reduction: ReductionISTToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted.len(), 3); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/lengthboundeddisjointpaths_ilp.rs b/src/unit_tests/rules/lengthboundeddisjointpaths_ilp.rs new file mode 100644 index 00000000..d7dc1f6e --- /dev/null +++ b/src/unit_tests/rules/lengthboundeddisjointpaths_ilp.rs @@ -0,0 +1,23 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::ReduceTo; +use crate::topology::SimpleGraph; + +#[test] +fn test_lengthboundeddisjointpaths_to_ilp_closed_loop() { + // Diamond graph: 4 vertices, s=0, t=3, J=2, K=2 + let source = LengthBoundedDisjointPaths::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (1, 3), (2, 3)]), + 0, + 3, + 2, + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "LengthBoundedDisjointPaths->ILP closed loop", + ); +} diff --git a/src/unit_tests/rules/longestcircuit_ilp.rs b/src/unit_tests/rules/longestcircuit_ilp.rs new file mode 100644 index 00000000..16784eef --- /dev/null +++ b/src/unit_tests/rules/longestcircuit_ilp.rs @@ -0,0 +1,111 @@ +use super::*; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Triangle with unit lengths, bound 3 + let problem = LongestCircuit::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 3, + ); + let reduction: ReductionLongestCircuitToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // m=3, n=3, commodities=2, flow=2*3*2=12, total=3+3+12=18 + assert_eq!(ilp.num_vars, 18); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_longestcircuit_to_ilp_closed_loop() { + // Hexagon with varying edge lengths, bound = 17 (all 6 outer edges sum to 17) + let problem = LongestCircuit::new( + SimpleGraph::new( + 6, + vec![ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 0), + (0, 3), + (1, 4), + (2, 5), + (3, 5), + ], + ), + vec![3, 2, 4, 1, 5, 2, 3, 2, 1, 2], + 17, + ); + // BruteForce on source to verify feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionLongestCircuitToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should satisfy the LongestCircuit bound" + ); +} + +#[test] +fn test_longestcircuit_to_ilp_triangle() { + // Triangle: bound 3, all edges length 1 + let problem = LongestCircuit::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 3, + ); + let reduction: ReductionLongestCircuitToILP = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_satisfaction_target( + &problem, + &reduction, + "LongestCircuit->ILP triangle", + ); +} + +#[test] +fn test_longestcircuit_to_ilp_infeasible() { + // Triangle with bound too high + let problem = LongestCircuit::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 4, // bound 4 > total 3 = infeasible + ); + let reduction: ReductionLongestCircuitToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!(result.is_none(), "Bound exceeds max circuit length"); +} + +#[test] +fn test_solution_extraction() { + let problem = LongestCircuit::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0), (0, 2), (1, 3)]), + vec![1, 1, 1, 1, 2, 2], + 4, + ); + let reduction: ReductionLongestCircuitToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/minimumcutintoboundedsets_ilp.rs b/src/unit_tests/rules/minimumcutintoboundedsets_ilp.rs new file mode 100644 index 00000000..5a635e73 --- /dev/null +++ b/src/unit_tests/rules/minimumcutintoboundedsets_ilp.rs @@ -0,0 +1,70 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::MinimumCutIntoBoundedSets; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::ReduceTo; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +fn small_instance() -> MinimumCutIntoBoundedSets { + // Path graph 0-1-2-3, unit weights, s=0, t=3, B=3, K=2 + MinimumCutIntoBoundedSets::new( + SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), + vec![1, 1, 1], + 0, + 3, + 3, + 2, + ) +} + +#[test] +fn test_minimumcutintoboundedsets_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionMinCutBSToILP = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "MinCutBS -> ILP round trip", + ); +} + +#[test] +fn test_reduction_shape() { + let source = small_instance(); + let reduction: ReductionMinCutBSToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + // 4 vertex vars + 3 edge vars = 7 + assert_eq!(ilp.num_vars, 7); +} + +#[test] +fn test_extract_solution() { + let source = small_instance(); + let reduction: ReductionMinCutBSToILP = ReduceTo::>::reduce_to(&source); + let target_sol = vec![0, 0, 1, 1, 0, 1, 0]; + let extracted = reduction.extract_solution(&target_sol); + assert_eq!(extracted, vec![0, 0, 1, 1]); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_larger_instance() { + let source = MinimumCutIntoBoundedSets::new( + SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (0, 2), (3, 5)], + ), + vec![1, 2, 1, 2, 1, 2, 1], + 0, + 5, + 4, + 3, + ); + let reduction: ReductionMinCutBSToILP = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "MinCutBS larger instance", + ); +} diff --git a/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs new file mode 100644 index 00000000..2beb3ee2 --- /dev/null +++ b/src/unit_tests/rules/minimumtardinesssequencing_ilp.rs @@ -0,0 +1,64 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +#[test] +fn test_minimumtardinesssequencing_to_ilp_closed_loop() { + let problem = MinimumTardinessSequencing::new(3, vec![2, 3, 1], vec![(0, 2)]); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "MinimumTardinessSequencing->ILP closed loop", + ); +} + +#[test] +fn test_minimumtardinesssequencing_to_ilp_bf_vs_ilp() { + let problem = MinimumTardinessSequencing::new(4, vec![2, 3, 1, 4], vec![(0, 2)]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); + assert!(ilp_value.is_valid()); +} + +#[test] +fn test_minimumtardinesssequencing_to_ilp_no_precedences() { + let problem = MinimumTardinessSequencing::new(3, vec![1, 2, 3], vec![]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert!(problem.evaluate(&extracted).is_valid()); +} + +#[test] +fn test_minimumtardinesssequencing_to_ilp_all_tight() { + // All deadlines equal 1: only one task can be on time + let problem = MinimumTardinessSequencing::new(3, vec![1, 1, 1], vec![]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let value = problem.evaluate(&extracted); + assert!(value.is_valid()); + // At most 2 tardy tasks (only first task is on time if d=1) + assert_eq!(value.0, Some(2)); +} diff --git a/src/unit_tests/rules/mixedchinesepostman_ilp.rs b/src/unit_tests/rules/mixedchinesepostman_ilp.rs new file mode 100644 index 00000000..1375e157 --- /dev/null +++ b/src/unit_tests/rules/mixedchinesepostman_ilp.rs @@ -0,0 +1,29 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::MixedGraph; +use crate::traits::Problem; + +#[test] +fn test_mixedchinesepostman_to_ilp_closed_loop() { + // 3 vertices, 1 directed arc, 2 undirected edges, bound 4 + let source = MixedChinesePostman::new( + MixedGraph::new(3, vec![(0, 1)], vec![(1, 2), (2, 0)]), + vec![1], + vec![1, 1], + 4, + ); + let direct = BruteForce::new() + .find_witness(&source) + .expect("source instance should be satisfiable"); + assert!(source.evaluate(&direct)); + + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/optimallineararrangement_ilp.rs b/src/unit_tests/rules/optimallineararrangement_ilp.rs new file mode 100644 index 00000000..8c9b1291 --- /dev/null +++ b/src/unit_tests/rules/optimallineararrangement_ilp.rs @@ -0,0 +1,97 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Path P4: 0-1-2-3, bound 3 + let problem = + OptimalLinearArrangement::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), 3); + let reduction: ReductionOLAToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // num_x=16, p_v=4, z_e=3, total=23 + assert_eq!(ilp.num_vars, 23); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_optimallineararrangement_to_ilp_closed_loop() { + // Path graph with bound = 3 (identity permutation achieves cost 3) + let problem = + OptimalLinearArrangement::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), 3); + // BruteForce on source to verify feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionOLAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should satisfy the OLA bound" + ); +} + +#[test] +fn test_optimallineararrangement_to_ilp_with_chords() { + // 6 vertices, path + chords, bound 11 + let problem = OptimalLinearArrangement::new( + SimpleGraph::new( + 6, + vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (0, 3), (2, 5)], + ), + 11, + ); + + // BruteForce on source + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionOLAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_optimallineararrangement_to_ilp_infeasible() { + // K4: minimum OLA cost is 10, bound 5 should be infeasible + let problem = OptimalLinearArrangement::new( + SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]), + 5, + ); + let reduction: ReductionOLAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!(result.is_none(), "K4 with bound 5 should be infeasible"); +} + +#[test] +fn test_solution_extraction() { + let problem = + OptimalLinearArrangement::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)]), 3); + let reduction: ReductionOLAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/paintshop_ilp.rs b/src/unit_tests/rules/paintshop_ilp.rs new file mode 100644 index 00000000..b728e0d6 --- /dev/null +++ b/src/unit_tests/rules/paintshop_ilp.rs @@ -0,0 +1,63 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Sequence: A, B, A, B => 2 cars, 4 positions + let problem = PaintShop::new(vec!["A", "B", "A", "B"]); + let reduction: ReductionPaintShopToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // 2 car vars + 4 k vars + 4 c vars = 10 + assert_eq!(ilp.num_vars(), 10); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_paintshop_to_ilp_closed_loop() { + let problem = PaintShop::new(vec!["A", "B", "A", "C", "B", "C"]); + let reduction: ReductionPaintShopToILP = ReduceTo::>::reduce_to(&problem); + + assert_optimization_round_trip_from_optimization_target( + &problem, + &reduction, + "PaintShop->ILP closed loop", + ); +} + +#[test] +fn test_paintshop_to_ilp_bf_vs_ilp() { + let problem = PaintShop::new(vec!["A", "B", "A", "C", "B", "C"]); + let reduction: ReductionPaintShopToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&problem); + let bf_value = problem.evaluate(&bf_solutions[0]); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert_eq!(bf_value, ilp_value); +} + +#[test] +fn test_solution_extraction() { + // Minimal: A, A => 1 car + let problem = PaintShop::new(vec!["A", "A"]); + let reduction: ReductionPaintShopToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted.len(), 1); + // Either 0 or 1 is valid; coloring is [x, 1-x], switches = 1 + assert!(problem.evaluate(&extracted).is_valid()); +} diff --git a/src/unit_tests/rules/pathconstrainednetworkflow_ilp.rs b/src/unit_tests/rules/pathconstrainednetworkflow_ilp.rs new file mode 100644 index 00000000..98757b34 --- /dev/null +++ b/src/unit_tests/rules/pathconstrainednetworkflow_ilp.rs @@ -0,0 +1,31 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +#[test] +fn test_pathconstrainednetworkflow_to_ilp_closed_loop() { + // 3 vertices, arcs (0,1),(1,2),(0,2), caps all 1, 2 paths, req 2 + let source = PathConstrainedNetworkFlow::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + 0, + 2, + vec![vec![0, 1], vec![2]], + 2, + ); + let direct = BruteForce::new() + .find_witness(&source) + .expect("source instance should be satisfiable"); + assert!(source.evaluate(&direct)); + + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/quadraticassignment_ilp.rs b/src/unit_tests/rules/quadraticassignment_ilp.rs new file mode 100644 index 00000000..fd0bade7 --- /dev/null +++ b/src/unit_tests/rules/quadraticassignment_ilp.rs @@ -0,0 +1,107 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; + +fn small_qap() -> QuadraticAssignment { + QuadraticAssignment::new( + vec![vec![0, 5, 2], vec![5, 0, 3], vec![2, 3, 0]], + vec![vec![0, 4, 1], vec![4, 0, 3], vec![1, 3, 0]], + ) +} + +#[test] +fn test_reduction_creates_valid_ilp() { + let problem = small_qap(); + let reduction: ReductionQAPToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // n=3, m=3: num_x=9, z pairs: 3*2*3*3=54, total=63 + assert_eq!(ilp.num_vars, 63); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_quadraticassignment_to_ilp_closed_loop() { + let problem = small_qap(); + // BruteForce on source to get optimal value + let bf = BruteForce::new(); + let bf_solution = bf.find_witness(&problem).expect("brute-force optimum"); + let bf_value = problem.evaluate(&bf_solution); + + // Solve via ILP + let reduction: ReductionQAPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert!( + ilp_value.is_valid(), + "Extracted solution should be a valid assignment" + ); + assert_eq!( + ilp_value, bf_value, + "ILP and brute-force should agree on optimal value" + ); +} + +#[test] +fn test_quadraticassignment_to_ilp_2x2() { + let problem = + QuadraticAssignment::new(vec![vec![0, 1], vec![1, 0]], vec![vec![0, 2], vec![2, 0]]); + // BruteForce on source + let bf = BruteForce::new(); + let bf_solution = bf.find_witness(&problem).expect("brute-force optimum"); + let bf_value = problem.evaluate(&bf_solution); + + // Solve via ILP + let reduction: ReductionQAPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert!(ilp_value.is_valid()); + assert_eq!(ilp_value, bf_value); +} + +#[test] +fn test_solution_extraction() { + let problem = small_qap(); + let reduction: ReductionQAPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let metric = problem.evaluate(&extracted); + assert!(metric.is_valid()); +} + +#[test] +fn test_quadraticassignment_to_ilp_rectangular() { + // 2 facilities, 3 locations (more locations than facilities) + let problem = QuadraticAssignment::new( + vec![vec![0, 3], vec![3, 0]], + vec![vec![0, 1, 5], vec![1, 0, 2], vec![5, 2, 0]], + ); + // BruteForce on source + let bf = BruteForce::new(); + let bf_solution = bf.find_witness(&problem).expect("brute-force optimum"); + let bf_value = problem.evaluate(&bf_solution); + + // Solve via ILP + let reduction: ReductionQAPToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + + assert!(ilp_value.is_valid()); + assert_eq!(ilp_value, bf_value); +} diff --git a/src/unit_tests/rules/resourceconstrainedscheduling_ilp.rs b/src/unit_tests/rules/resourceconstrainedscheduling_ilp.rs new file mode 100644 index 00000000..f2949771 --- /dev/null +++ b/src/unit_tests/rules/resourceconstrainedscheduling_ilp.rs @@ -0,0 +1,71 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_resourceconstrainedscheduling_to_ilp_closed_loop() { + let problem = ResourceConstrainedScheduling::new( + 3, + vec![20], + vec![vec![6], vec![7], vec![7], vec![6], vec![8], vec![6]], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "ResourceConstrainedScheduling->ILP closed loop", + ); +} + +#[test] +fn test_resourceconstrainedscheduling_to_ilp_bf_vs_ilp() { + let problem = ResourceConstrainedScheduling::new( + 3, + vec![20], + vec![vec![6], vec![7], vec![7], vec![6], vec![8], vec![6]], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_resourceconstrainedscheduling_to_ilp_infeasible() { + // 3 tasks, 1 processor, 1 resource with bound 5, deadline 1 + // Each task requires 6 resource units — can't fit any two in same slot + let problem = + ResourceConstrainedScheduling::new(1, vec![5], vec![vec![6], vec![6], vec![6]], 1); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible RCS should produce infeasible ILP" + ); +} + +#[test] +fn test_resourceconstrainedscheduling_to_ilp_structure() { + let problem = + ResourceConstrainedScheduling::new(2, vec![10], vec![vec![3], vec![4], vec![5]], 2); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=3 tasks, D=2 deadline → 6 variables + assert_eq!(ilp.num_vars, 6); + // 3 one-hot + 2 capacity + 1*2 resource = 7 + assert_eq!(ilp.constraints.len(), 7); +} diff --git a/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs b/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs new file mode 100644 index 00000000..fcd3e7ec --- /dev/null +++ b/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs @@ -0,0 +1,97 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + let problem = RootedTreeStorageAssignment::new(3, vec![vec![0, 1], vec![1, 2]], 1); + let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=3, r=2 (both subsets have size 2) + let n = 3; + let r = 2; + let expected = n * n * n + 2 * n * n + n + r * (n * n + 2 * n + 3); + assert_eq!(ilp.num_vars(), expected); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_rootedtreestorageassignment_to_ilp_bf_vs_ilp() { + let problem = RootedTreeStorageAssignment::new(3, vec![vec![0, 1], vec![1, 2]], 1); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + let bf_value = bf_witness + .as_ref() + .map(|w| problem.evaluate(w)) + .unwrap_or(Or(false)); + + let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_result = ilp_solver.solve(reduction.target_problem()); + + match ilp_result { + Some(ilp_solution) => { + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + assert!(ilp_value.0, "ILP solution should be feasible"); + assert!(bf_value.0, "BF should also find feasible solution"); + } + None => { + assert!(!bf_value.0, "both should agree on infeasibility"); + } + } +} + +#[test] +fn test_rootedtreestorageassignment_to_ilp_infeasible() { + // 3 elements, subsets {0,1},{1,2},{0,2} with bound 0: + // All 3 subsets must have extension cost 0 => all pairs are ancestor chains. + // But {0,1},{1,2},{0,2} can't all be chains with cost 0 in a rooted tree + // unless all 3 elements are on one path (chain 0-1-2), which gives cost 0 for all. + // Actually that is feasible: root=0, parent(1)=0, parent(2)=1, depth 0,1,2. + // Let's make it truly infeasible with a strict bound: + // 4 elements, subsets {0,1},{2,3},{0,2},{1,3} bound 0. + // This requires all to be on chains of cost 0 (perfect paths), which is impossible + // for crossing pairs. + let problem = RootedTreeStorageAssignment::new( + 4, + vec![vec![0, 1], vec![2, 3], vec![0, 2], vec![1, 3]], + 0, + ); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + + let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_result = ilp_solver.solve(reduction.target_problem()); + + match ilp_result { + Some(ilp_solution) => { + let extracted = reduction.extract_solution(&ilp_solution); + let ilp_value = problem.evaluate(&extracted); + assert!(ilp_value.0, "ILP solution should be feasible"); + assert!(bf_witness.is_some(), "BF should also find a solution"); + } + None => { + assert!(bf_witness.is_none(), "both should agree on infeasibility"); + } + } +} + +#[test] +fn test_solution_extraction() { + let problem = RootedTreeStorageAssignment::new(3, vec![vec![0, 1, 2]], 0); + let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted.len(), 3); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/ruralpostman_ilp.rs b/src/unit_tests/rules/ruralpostman_ilp.rs new file mode 100644 index 00000000..79a25aa5 --- /dev/null +++ b/src/unit_tests/rules/ruralpostman_ilp.rs @@ -0,0 +1,29 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; + +#[test] +fn test_ruralpostman_to_ilp_closed_loop() { + // Triangle: 3 vertices, 3 edges, require edge 0, bound 3 + let source = RuralPostman::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2), (0, 2)]), + vec![1, 1, 1], + vec![0], + 3, + ); + let direct = BruteForce::new() + .find_witness(&source) + .expect("source instance should be satisfiable"); + assert!(source.evaluate(&direct)); + + let reduction = ReduceTo::>::reduce_to(&source); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be feasible"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert!(source.evaluate(&extracted)); +} diff --git a/src/unit_tests/rules/sequencingtominimizemaximumcumulativecost_ilp.rs b/src/unit_tests/rules/sequencingtominimizemaximumcumulativecost_ilp.rs new file mode 100644 index 00000000..6e2d592c --- /dev/null +++ b/src/unit_tests/rules/sequencingtominimizemaximumcumulativecost_ilp.rs @@ -0,0 +1,59 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_sequencingtominimizemaximumcumulativecost_to_ilp_closed_loop() { + let problem = + SequencingToMinimizeMaximumCumulativeCost::new(vec![2, -1, 3, -2], vec![(0, 2)], 4); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "SequencingToMinimizeMaximumCumulativeCost->ILP closed loop", + ); +} + +#[test] +fn test_sequencingtominimizemaximumcumulativecost_to_ilp_bf_vs_ilp() { + let problem = + SequencingToMinimizeMaximumCumulativeCost::new(vec![2, -1, 3, -2], vec![(0, 2)], 4); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingtominimizemaximumcumulativecost_to_ilp_infeasible() { + // Costs all positive, bound 0, impossible if any task has positive cost + let problem = SequencingToMinimizeMaximumCumulativeCost::new(vec![1, 2, 3], vec![], 0); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible STMMCC should produce infeasible ILP" + ); +} + +#[test] +fn test_sequencingtominimizemaximumcumulativecost_to_ilp_no_precedences() { + let problem = SequencingToMinimizeMaximumCumulativeCost::new(vec![3, -2, 1], vec![], 3); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/sequencingtominimizeweightedtardiness_ilp.rs b/src/unit_tests/rules/sequencingtominimizeweightedtardiness_ilp.rs new file mode 100644 index 00000000..1d68ac78 --- /dev/null +++ b/src/unit_tests/rules/sequencingtominimizeweightedtardiness_ilp.rs @@ -0,0 +1,66 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_sequencingtominimizeweightedtardiness_to_ilp_closed_loop() { + let problem = + SequencingToMinimizeWeightedTardiness::new(vec![3, 4, 2], vec![2, 3, 1], vec![5, 8, 4], 10); + let reduction = ReduceTo::>::reduce_to(&problem); + + // Use ILPSolver directly (BruteForce cannot enumerate ILP) + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingtominimizeweightedtardiness_to_ilp_bf_vs_ilp() { + let problem = + SequencingToMinimizeWeightedTardiness::new(vec![3, 4, 2], vec![2, 3, 1], vec![5, 8, 4], 10); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingtominimizeweightedtardiness_to_ilp_infeasible() { + // All jobs have length 10, deadline 1, weight 1, bound 0: impossible + let problem = + SequencingToMinimizeWeightedTardiness::new(vec![10, 10], vec![1, 1], vec![1, 1], 0); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible STMWT should produce infeasible ILP" + ); +} + +#[test] +fn test_sequencingtominimizeweightedtardiness_to_ilp_no_tardiness() { + // Large deadlines: no job is tardy + let problem = SequencingToMinimizeWeightedTardiness::new( + vec![1, 1, 1], + vec![1, 1, 1], + vec![10, 10, 10], + 0, + ); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/sequencingwithreleasetimesanddeadlines_ilp.rs b/src/unit_tests/rules/sequencingwithreleasetimesanddeadlines_ilp.rs new file mode 100644 index 00000000..4ed4daca --- /dev/null +++ b/src/unit_tests/rules/sequencingwithreleasetimesanddeadlines_ilp.rs @@ -0,0 +1,59 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_sequencingwithreleasetimesanddeadlines_to_ilp_closed_loop() { + let problem = + SequencingWithReleaseTimesAndDeadlines::new(vec![1, 2, 1], vec![0, 0, 2], vec![3, 3, 4]); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "SequencingWithReleaseTimesAndDeadlines->ILP closed loop", + ); +} + +#[test] +fn test_sequencingwithreleasetimesanddeadlines_to_ilp_bf_vs_ilp() { + let problem = + SequencingWithReleaseTimesAndDeadlines::new(vec![1, 2, 1], vec![0, 0, 2], vec![3, 3, 4]); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_sequencingwithreleasetimesanddeadlines_to_ilp_infeasible() { + // Two tasks that can't both fit: both need time 0-1, but overlap + let problem = SequencingWithReleaseTimesAndDeadlines::new(vec![2, 2], vec![0, 0], vec![2, 2]); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible SWRTD should produce infeasible ILP" + ); +} + +#[test] +fn test_sequencingwithreleasetimesanddeadlines_to_ilp_single_task() { + let problem = SequencingWithReleaseTimesAndDeadlines::new(vec![3], vec![1], vec![5]); + let reduction = ReduceTo::>::reduce_to(&problem); + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("single-task ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/shortestcommonsupersequence_ilp.rs b/src/unit_tests/rules/shortestcommonsupersequence_ilp.rs new file mode 100644 index 00000000..c7eabbc8 --- /dev/null +++ b/src/unit_tests/rules/shortestcommonsupersequence_ilp.rs @@ -0,0 +1,61 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Alphabet {0,1}, strings [0,1] and [1,0], bound 3 + let problem = ShortestCommonSupersequence::new(2, vec![vec![0, 1], vec![1, 0]], 3); + let reduction: ReductionSCSToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // x vars: 3*2 = 6, m vars: 4*3 = 12, total = 18 + assert_eq!(ilp.num_vars(), 18); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + assert!(ilp.objective.is_empty()); +} + +#[test] +fn test_shortestcommonsupersequence_to_ilp_closed_loop() { + let problem = ShortestCommonSupersequence::new(2, vec![vec![0, 1], vec![1, 0]], 3); + let reduction: ReductionSCSToILP = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_satisfaction_target( + &problem, + &reduction, + "SCS->ILP closed loop", + ); +} + +#[test] +fn test_shortestcommonsupersequence_to_ilp_bf_vs_ilp() { + let problem = ShortestCommonSupersequence::new(3, vec![vec![0, 1, 2], vec![2, 1, 0]], 5); + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + assert!(bf_witness.is_some()); + + let reduction: ReductionSCSToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_solution_extraction() { + // Single string [0,1], bound 2 over alphabet {0,1} + let problem = ShortestCommonSupersequence::new(2, vec![vec![0, 1]], 2); + let reduction: ReductionSCSToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted.len(), 2); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/sparsematrixcompression_ilp.rs b/src/unit_tests/rules/sparsematrixcompression_ilp.rs new file mode 100644 index 00000000..d92744b4 --- /dev/null +++ b/src/unit_tests/rules/sparsematrixcompression_ilp.rs @@ -0,0 +1,79 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_smc_to_ilp_structure() { + let problem = SparseMatrixCompression::new( + vec![ + vec![true, false, false, true], + vec![false, true, false, false], + vec![false, false, true, false], + vec![true, false, false, false], + ], + 2, + ); + let reduction: ReductionSMCToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 4*2 = 8 + assert_eq!(ilp.num_vars, 8); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_smc_to_ilp_closed_loop() { + let problem = SparseMatrixCompression::new( + vec![ + vec![true, false, false, true], + vec![false, true, false, false], + vec![false, false, true, false], + vec![true, false, false, false], + ], + 2, + ); + let reduction: ReductionSMCToILP = ReduceTo::>::reduce_to(&problem); + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "SparseMatrixCompression->ILP closed loop", + ); +} + +#[test] +fn test_smc_to_ilp_bf_vs_ilp() { + let problem = SparseMatrixCompression::new( + vec![ + vec![true, false, false, true], + vec![false, true, false, false], + vec![false, false, true, false], + vec![true, false, false, false], + ], + 2, + ); + let reduction: ReductionSMCToILP = ReduceTo::>::reduce_to(&problem); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem).expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_smc_to_ilp_trivial() { + // Single row, K=1 + let problem = SparseMatrixCompression::new(vec![vec![true, false]], 1); + let reduction: ReductionSMCToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // x: 1*1 = 1 + assert_eq!(ilp.num_vars, 1); +} diff --git a/src/unit_tests/rules/stackercrane_ilp.rs b/src/unit_tests/rules/stackercrane_ilp.rs new file mode 100644 index 00000000..776d4a3c --- /dev/null +++ b/src/unit_tests/rules/stackercrane_ilp.rs @@ -0,0 +1,23 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::rules::ReduceTo; + +#[test] +fn test_stackercrane_to_ilp_closed_loop() { + // 3 vertices, 2 required arcs, 1 connector edge + let source = StackerCrane::new( + 3, + vec![(0, 1), (2, 0)], + vec![(1, 2)], + vec![1, 1], + vec![1], + 4, + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "StackerCrane->ILP closed loop", + ); +} diff --git a/src/unit_tests/rules/steinertreeingraphs_ilp.rs b/src/unit_tests/rules/steinertreeingraphs_ilp.rs new file mode 100644 index 00000000..7f07c540 --- /dev/null +++ b/src/unit_tests/rules/steinertreeingraphs_ilp.rs @@ -0,0 +1,23 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::ReduceTo; +use crate::topology::SimpleGraph; + +#[test] +fn test_steinertreeingraphs_to_ilp_closed_loop() { + // Path graph: 0 - 1 - 2, terminals {0, 2}, weights [1, 1] + // Optimal Steiner tree: use both edges (cost 2) + // ILP variables: 2 + 2*2*1 = 6 binary = 64 configs + let source = SteinerTreeInGraphs::new( + SimpleGraph::new(3, vec![(0, 1), (1, 2)]), + vec![0, 2], + vec![1, 1], + ); + let reduction = ReduceTo::>::reduce_to(&source); + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "SteinerTreeInGraphs->ILP closed loop", + ); +} diff --git a/src/unit_tests/rules/stringtostringcorrection_ilp.rs b/src/unit_tests/rules/stringtostringcorrection_ilp.rs new file mode 100644 index 00000000..823b3871 --- /dev/null +++ b/src/unit_tests/rules/stringtostringcorrection_ilp.rs @@ -0,0 +1,79 @@ +use super::*; +use crate::models::algebraic::{ObjectiveSense, ILP}; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // source = [0,1], target = [1], bound = 1 (delete position 0) + let problem = StringToStringCorrection::new(2, vec![0, 1], vec![1], 1); + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + // n=2, K=1: (K+1)*n*n + (K+1)*n + K*n + K*(n-1) + K = 2*4 + 2*2 + 1*2 + 1*1 + 1 = 16 + assert_eq!(ilp.num_vars(), 16); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_stringtostringcorrection_to_ilp_bf_vs_ilp() { + // source=[0,1], target=[1], bound=1 (delete position 0) + let problem = StringToStringCorrection::new(2, vec![0, 1], vec![1], 1); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + assert!(bf_witness.is_some(), "BF should find a solution"); + + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_solution_extraction_delete() { + // source=[0,1], target=[1], bound=1 => delete at position 0 + let problem = StringToStringCorrection::new(2, vec![0, 1], vec![1], 1); + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(extracted.len(), 1); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_stringtostringcorrection_to_ilp_infeasible() { + // source=[0], target=[0,1]: m > n, so model rejects before any search + // The ILP is trivially infeasible (0 vars, unsatisfiable constraint) + let problem = StringToStringCorrection::new(2, vec![0], vec![0, 1], 1); + + // Verify the source problem is actually infeasible + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + assert!(bf_witness.is_none(), "source should be infeasible"); +} + +#[test] +fn test_stringtostringcorrection_to_ilp_swap() { + // source=[1,0], target=[0,1], bound=1 => swap at position 0 + let problem = StringToStringCorrection::new(2, vec![1, 0], vec![0, 1], 1); + + let bf = BruteForce::new(); + let bf_witness = bf.find_witness(&problem); + assert!(bf_witness.is_some(), "BF should find a solution"); + + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs b/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs new file mode 100644 index 00000000..f7f819ff --- /dev/null +++ b/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs @@ -0,0 +1,75 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::models::graph::StrongConnectivityAugmentation; +use crate::rules::ReduceTo; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::DirectedGraph; +use crate::traits::Problem; + +fn small_instance() -> StrongConnectivityAugmentation { + // Path 0->1->2, candidates: (2,0,1),(1,0,2), bound=2 + StrongConnectivityAugmentation::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![(2, 0, 1), (1, 0, 2)], + 2, + ) +} + +#[test] +fn test_strongconnectivityaugmentation_to_ilp_closed_loop() { + let source = small_instance(); + let reduction: ReductionSCAToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + + // Solve source with brute force + let bf = BruteForce::new(); + let bf_solutions = bf.find_all_witnesses(&source); + assert!(!bf_solutions.is_empty(), "source should be satisfiable"); + + // Solve ILP + let ilp_solver = ILPSolver::new(); + let ilp_sol = ilp_solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + + assert!( + source.evaluate(&extracted).0, + "extracted solution must be valid" + ); +} + +#[test] +fn test_extract_solution() { + let source = small_instance(); + let reduction: ReductionSCAToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert_eq!(extracted.len(), 2); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_trivial_single_vertex() { + let source = StrongConnectivityAugmentation::new(DirectedGraph::new(1, vec![]), vec![], 0); + let reduction: ReductionSCAToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + let ilp_sol = solver.solve(ilp).expect("trivial should be solvable"); + let extracted = reduction.extract_solution(&ilp_sol); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_infeasible_budget() { + // 3 vertices 0->1->2, only candidate is (2,0,10), budget=5 + let source = StrongConnectivityAugmentation::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2)]), + vec![(2, 0, 10)], + 5, + ); + let reduction: ReductionSCAToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let solver = ILPSolver::new(); + assert!(solver.solve(ilp).is_none()); +} diff --git a/src/unit_tests/rules/subgraphisomorphism_ilp.rs b/src/unit_tests/rules/subgraphisomorphism_ilp.rs new file mode 100644 index 00000000..ffab5916 --- /dev/null +++ b/src/unit_tests/rules/subgraphisomorphism_ilp.rs @@ -0,0 +1,96 @@ +use super::*; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_reduction_creates_valid_ilp() { + // Host: K4, Pattern: K3 + let host = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]); + let problem = SubgraphIsomorphism::new(host, pattern); + let reduction: ReductionSubIsoToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + // n_pat=3, n_host=4: num_vars=12 + assert_eq!(ilp.num_vars, 12); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_subgraphisomorphism_to_ilp_closed_loop() { + // Host: K4, Pattern: K3 (always embeddable) + let host = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]); + let problem = SubgraphIsomorphism::new(host, pattern); + + // BruteForce on source to confirm feasibility + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionSubIsoToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!( + problem.evaluate(&extracted), + Or(true), + "ILP solution should be a valid subgraph isomorphism" + ); +} + +#[test] +fn test_subgraphisomorphism_to_ilp_path_in_cycle() { + // Host: C4, Pattern: P3 (path on 3 vertices) + let host = SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3), (3, 0)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let problem = SubgraphIsomorphism::new(host, pattern); + + // BruteForce on source + let bf = BruteForce::new(); + let bf_solution = bf + .find_witness(&problem) + .expect("brute-force should find a solution"); + assert_eq!(problem.evaluate(&bf_solution), Or(true)); + + // Solve via ILP + let reduction: ReductionSubIsoToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_subgraphisomorphism_to_ilp_infeasible() { + // Host: path 0-1-2, Pattern: triangle K3 (not embeddable) + let host = SimpleGraph::new(3, vec![(0, 1), (1, 2)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]); + let problem = SubgraphIsomorphism::new(host, pattern); + let reduction: ReductionSubIsoToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let result = ilp_solver.solve(reduction.target_problem()); + assert!(result.is_none(), "K3 in path should be infeasible"); +} + +#[test] +fn test_solution_extraction() { + let host = SimpleGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]); + let pattern = SimpleGraph::new(3, vec![(0, 1), (0, 2), (1, 2)]); + let problem = SubgraphIsomorphism::new(host, pattern); + let reduction: ReductionSubIsoToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + let ilp_solution = ilp_solver + .solve(reduction.target_problem()) + .expect("solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/timetabledesign_ilp.rs b/src/unit_tests/rules/timetabledesign_ilp.rs new file mode 100644 index 00000000..f4cdd952 --- /dev/null +++ b/src/unit_tests/rules/timetabledesign_ilp.rs @@ -0,0 +1,83 @@ +use super::*; +use crate::models::algebraic::ILP; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::{BruteForce, ILPSolver}; +use crate::traits::Problem; +use crate::types::Or; + +#[test] +fn test_timetabledesign_to_ilp_closed_loop() { + // 2 craftsmen, 2 tasks, 2 periods — all available, requirements: c0-t0=1, c1-t1=1 + let problem = TimetableDesign::new( + 2, + 2, + 2, + vec![vec![true, true], vec![true, true]], + vec![vec![true, true], vec![true, true]], + vec![vec![1, 0], vec![0, 1]], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "TimetableDesign->ILP closed loop", + ); +} + +#[test] +fn test_timetabledesign_to_ilp_bf_vs_ilp() { + let problem = TimetableDesign::new( + 2, + 2, + 2, + vec![vec![true, true], vec![true, true]], + vec![vec![true, true], vec![true, true]], + vec![vec![1, 0], vec![0, 1]], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + let bf_witness = BruteForce::new() + .find_witness(&problem) + .expect("should be feasible"); + assert_eq!(problem.evaluate(&bf_witness), Or(true)); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_timetabledesign_to_ilp_infeasible() { + // Craftsman 0 available only in period 0, but needs 2 periods of work with task 0 + let problem = TimetableDesign::new(1, 1, 1, vec![vec![true]], vec![vec![true]], vec![vec![2]]); + let reduction = ReduceTo::>::reduce_to(&problem); + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "infeasible TD should produce infeasible ILP" + ); +} + +#[test] +fn test_timetabledesign_to_ilp_identity_extraction() { + let problem = TimetableDesign::new( + 2, + 2, + 2, + vec![vec![true, true], vec![true, true]], + vec![vec![true, true], vec![true, true]], + vec![vec![1, 0], vec![0, 1]], + ); + let reduction = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + // Identity extraction: ILP solution == source config + assert_eq!(extracted, ilp_solution); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} From bca16d3da6afffc04577ec4c4a20e41e350cc184 Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 19:31:30 +0800 Subject: [PATCH 4/6] update skills --- .claude/CLAUDE.md | 2 +- .claude/skills/add-model/SKILL.md | 29 ++ .claude/skills/add-rule/SKILL.md | 3 + .claude/skills/check-issue/SKILL.md | 4 +- .claude/skills/issue-to-pr/SKILL.md | 14 +- .claude/skills/propose/SKILL.md | 33 +- .github/ISSUE_TEMPLATE/problem.md | 12 +- .gitignore | 1 + ...26-03-22-generalized-aggregation-design.md | 477 ------------------ 9 files changed, 84 insertions(+), 491 deletions(-) delete mode 100644 docs/plans/2026-03-22-generalized-aggregation-design.md diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index f90fd11b..27ce2026 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -7,7 +7,7 @@ Rust library for NP-hard problem reductions. Implements computational problems w These repo-local skills live under `.claude/skills/*/SKILL.md`. - [run-pipeline](skills/run-pipeline/SKILL.md) -- Pick a Ready issue from the GitHub Project board, move it through In Progress -> issue-to-pr -> Review pool. One issue at a time; forever-loop handles iteration. -- [issue-to-pr](skills/issue-to-pr/SKILL.md) -- Convert a GitHub issue into a PR with an implementation plan. One item per PR: `[Rule]` issues require both models to exist on `main`; never bundle model + rule in the same PR. +- [issue-to-pr](skills/issue-to-pr/SKILL.md) -- Convert a GitHub issue into a PR with an implementation plan. Default rule: one item per PR. Exception: a `[Model]` issue that explicitly claims direct ILP solvability should implement the model and its direct ` -> ILP` rule together; `[Rule]` issues still require both models to exist on `main`. - [add-model](skills/add-model/SKILL.md) -- Add a new problem model. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [add-rule](skills/add-rule/SKILL.md) -- Add a new reduction rule. Can be used standalone (brainstorms with user) or called from `issue-to-pr`. - [review-structural](skills/review-structural/SKILL.md) -- Project-specific structural completeness check: model/rule checklists, build, semantic correctness, issue compliance. Read-only, no code changes. Called by `review-pipeline`. diff --git a/.claude/skills/add-model/SKILL.md b/.claude/skills/add-model/SKILL.md index 960b8b82..0f00e738 100644 --- a/.claude/skills/add-model/SKILL.md +++ b/.claude/skills/add-model/SKILL.md @@ -55,6 +55,11 @@ Before implementation, verify that at least one reduction rule exists or is plan **If associated rules are found:** List them and continue. +**If the issue explicitly claims ILP solvability in "How to solve":** +- One associated rule MUST be a direct `[Rule] to ILP` +- Treat that direct ILP rule as part of the same implementation scope +- Do NOT split the model and its direct ILP rule into separate PRs + ## Reference Implementations Read these first to understand the patterns: @@ -75,6 +80,7 @@ Before implementing, make sure the plan explicitly covers these items that struc - `declare_variants!` is present with exactly one `default` variant when multiple concrete variants exist - CLI discovery and `pred create ` support are included where applicable - A canonical model example is registered for example-db / `pred create --example` +- If the issue explicitly claims direct ILP solving, the plan also includes the direct ` -> ILP` rule with exact overhead metadata, feature-gated registration, strong regression tests, and ILP-enabled verification - `docs/paper/reductions.typ` adds both the display-name dictionary entry and the `problem-def(...)` ## Step 1: Determine the category @@ -193,6 +199,19 @@ This example is now the canonical source for: - paper/example exports via `load-model-example()` in `reductions.typ` - example-db invariants tested in `src/unit_tests/example_db.rs` +## Step 4.7: Implement Direct ILP Rule When Claimed + +If the issue explicitly says the model is solvable by reducing **directly** to ILP, implement `src/rules/_ilp.rs` in the **same PR** as the model. This is the one exception to the normal "one item per PR" policy: the direct ` -> ILP` rule is part of the model feature, not optional follow-up work. + +Completeness bar: +- Feature-gate the rule under `ilp-solver` and register it normally +- Add exact overhead expressions and any required size-field getters; metadata must match the constructed ILP exactly +- Add strong tests in `src/unit_tests/rules/_ilp.rs`: structure/metadata, closed-loop semantics vs the source problem or brute force, extraction, `solve_reduced()` or ILP path coverage when appropriate, and weighted/infeasible/pathological regressions whenever the model semantics admit them +- Update CLI/example-db/paper paths so the claimed ILP solver route is actually usable and documented +- Verify with ILP-enabled workspace commands, not just non-ILP unit tests + +A direct ILP rule shipped with a model issue must match the completeness bar of a standalone production ILP reduction. Do not add a stub just to satisfy the issue text. + ## Step 5: Write unit tests Create `src/unit_tests/models//.rs`: @@ -206,6 +225,8 @@ Every model needs **at least 3 test functions** (the structural reviewer enforce - **Serialization** — round-trip serde (when the model is used in CLI/example-db flows). - **Paper example** — verify the worked example from the paper entry (see below). +If Step 4.7 applies, also add a dedicated ILP rule test file under `src/unit_tests/rules/_ilp.rs`. Use strong direct-to-ILP reductions in the repo as the reference bar: the tests should validate the actual formulation semantics, not just that an ILP file exists. + When you add `test__paper_example`, it should: 1. Construct the same instance shown in the paper's example figure 2. Evaluate the solution from the issue's **Expected Outcome** section as shown in the paper and assert it is valid (and optimal for optimization problems) @@ -259,10 +280,17 @@ Checklist: display name registered, notation self-contained, background present, ## Step 7: Verify +For ordinary model-only work: ```bash make test clippy # Must pass ``` +If Step 4.7 applied, run ILP-enabled workspace verification instead: +```bash +cargo clippy --all-targets --features ilp-highs -- -D warnings +cargo test --features "ilp-highs example-db" --workspace --verbose +``` + Structural and quality review is handled by the `review-pipeline` stage, not here. The run stage just needs to produce working code. ## Naming Conventions @@ -292,3 +320,4 @@ Structural and quality review is handled by the `review-pipeline` stage, not her | Schema lists derived fields | Schema should list constructor params, not internal fields (e.g., `matrix, k` not `matrix, m, n, k`) | | Missing canonical model example | Add a builder in `src/example_db/model_builders.rs` and keep it aligned with paper/example workflows | | Paper example not tested | Must include `test__paper_example` that verifies the exact instance, solution, and solution count shown in the paper | +| Claiming direct ILP solving but leaving ` -> ILP` for later | If the issue promises a direct ILP path, implement that rule in the same PR with exact overhead metadata and production-level ILP tests | diff --git a/.claude/skills/add-rule/SKILL.md b/.claude/skills/add-rule/SKILL.md index 9bff1abc..185e9184 100644 --- a/.claude/skills/add-rule/SKILL.md +++ b/.claude/skills/add-rule/SKILL.md @@ -193,6 +193,8 @@ Structural and quality review is handled by the `review-pipeline` stage, not her - If the target problem already has a solver, use it directly. - If the solving strategy requires ILP, implement the ILP reduction rule alongside (feature-gated under `ilp-solver`). +- A direct-to-ILP rule is a production reduction, not a stub. Match the completeness bar used by strong ILP reductions in this repo: exact overhead metadata, structure + closed-loop + extraction tests, weighted/infeasible/pathological regressions whenever the semantics require them, and ILP-enabled workspace verification. +- When this rule is the companion to a `[Model]` issue that explicitly claims ILP solvability, it belongs in the same PR as the model. - If a custom solver is needed, implement in `src/solvers/` and document. ## CLI Impact @@ -223,3 +225,4 @@ Aggregate-only reductions currently have a narrower CLI surface: | Not adding canonical example to `example_db` | Add builder in `src/example_db/rule_builders.rs` | | Not regenerating reduction graph | Run `cargo run --example export_graph` after adding a rule | | Source/target model not fully registered | Both problems must already have `declare_variants!`, aliases as needed, and CLI create support -- use `add-model` skill first | +| Treating a direct-to-ILP rule as a toy stub | Direct ILP reductions need exact overhead metadata and strong semantic regression tests, just like other production ILP rules | diff --git a/.claude/skills/check-issue/SKILL.md b/.claude/skills/check-issue/SKILL.md index dcbe8a8a..cfa297ae 100644 --- a/.claude/skills/check-issue/SKILL.md +++ b/.claude/skills/check-issue/SKILL.md @@ -227,6 +227,7 @@ Applies when the title contains `[Model]`. 5. Check **How to solve** section: - At least one solver method must be checked (brute-force, ILP reduction, or other) - If no solver path is identified → **Warn** ("No solver means reduction rules can't be verified") + - If direct ILP solving is claimed, the issue must link a direct `[Rule] to ILP` companion issue in the "Reduction Rule Crossref" section; otherwise → **Fail** --- @@ -305,7 +306,7 @@ Check all template sections are present and substantive: | Variables | Count, per-variable domain, semantic meaning | | Schema | Type name, variants, field table | | Complexity | Best known algorithm with citation **and** a concrete complexity expression in terms of problem parameters (e.g., `q^n`, `2^{0.8765n}`) | -| How to solve | At least one solver method checked | +| How to solve | At least one solver method checked; if ILP is claimed, a direct `[Rule] to ILP` issue must be linked | | Example Instance | Concrete instance that exercises the core structure | | Expected Outcome | Satisfaction: one valid / satisfying solution with brief justification. Optimization: one optimal solution with the optimal objective value | @@ -334,6 +335,7 @@ The formal definition must be **precise and implementable**: - Optimization problems must include a concrete optimal solution and the optimal objective value - **Detailed enough for paper**: This example will appear in the paper — it needs to be illustrative - **Round-trip testable**: The example must be complex enough that a round-trip test (construct instance → solve → verify) can catch implementation bugs. A too-simple instance (e.g., 2 vertices, a single clause) may have a trivially correct solution that passes even with a wrong implementation. The example should have multiple feasible configurations with different objective values (for optimization) or a mix of satisfying and non-satisfying configurations (for satisfaction problems), so that correctness is meaningfully tested. Rule of thumb: the instance should have at least 2 suboptimal feasible solutions in addition to the optimal one. +- **ILP-testable when claimed**: If the issue advertises a direct ILP path, the example should be rich enough to support strong ILP closed-loop tests rather than a degenerate "any formulation passes" case. ### 4e: Representation Feasibility diff --git a/.claude/skills/issue-to-pr/SKILL.md b/.claude/skills/issue-to-pr/SKILL.md index e22d134f..9962f4b8 100644 --- a/.claude/skills/issue-to-pr/SKILL.md +++ b/.claude/skills/issue-to-pr/SKILL.md @@ -72,7 +72,7 @@ For `[Rule]` issues, `ISSUE_JSON` already includes `source_problem`, `target_pro - If both `checks.source_model` and `checks.target_model` are `pass` → continue to step 4. - If either is `fail` → **STOP**. Comment on the issue: "Blocked: model `` does not exist in main yet. Please implement it first (or file a `[Model]` issue)." -**One item per PR:** Do NOT implement a missing model as part of a `[Rule]` PR. Each PR should contain exactly one model or one rule, never both. This avoids bloated PRs and repeated implementation when the model is needed by multiple rules. +**One item per PR, with one exception:** Do NOT implement a missing model as part of a `[Rule]` PR. `[Rule]` issues still require both models to exist on `main`. The only exception is a `[Model]` issue that explicitly claims direct ILP solvability: that PR should implement both the model and the direct ` -> ILP` rule together. ### 4. Research References @@ -89,7 +89,8 @@ Write implementation plan to `docs/plans/YYYY-MM-DD-.md` using `superpower The plan MUST reference the appropriate implementation skill and follow its steps: -- **For `[Model]` issues:** Follow [add-model](../add-model/SKILL.md) Steps 1-7 as the action pipeline +- **For ordinary `[Model]` issues:** Follow [add-model](../add-model/SKILL.md) Steps 1-7 as the action pipeline +- **For `[Model]` issues that explicitly claim direct ILP solving:** Follow [add-model](../add-model/SKILL.md) Steps 1-7 **and** [add-rule](../add-rule/SKILL.md) Steps 1-6 for the direct ` -> ILP` rule in the same plan / PR - **For `[Rule]` issues:** Follow [add-rule](../add-rule/SKILL.md) Steps 1-6 as the action pipeline Include the concrete details from the issue (problem definition, reduction algorithm, example, etc.) mapped onto each step. @@ -98,9 +99,14 @@ Include the concrete details from the issue (problem definition, reduction algor - Batch 1: Steps 1-5.5 (implement model, register, CLI, tests) - Batch 2: Step 6 (write paper entry — depends on batch 1 for exports) +For a `[Model]` issue with an explicit direct ILP claim, use: +- Batch 1: implement the model, register it, add the direct ` -> ILP` rule, and add model + rule tests +- Batch 2: write both the `problem-def(...)` and `reduction-rule(...)` paper entries, regenerate exports / fixtures, and run final ILP-enabled verification + **Solver rules:** - Ensure at least one solver is provided in the issue template. Check if the solving strategy is valid. If not, reply under issue to ask for clarification. -- If the solver uses integer programming, implement the model and ILP reduction rule together. +- If a `[Model]` issue explicitly claims direct ILP solving, implement the model and the direct ` -> ILP` reduction together in the same PR. Do not leave the ILP rule as a follow-up. +- The direct ILP rule must meet the same completeness bar as a standalone production ILP reduction: exact overhead metadata, feature-gated registration, strong closed-loop / extraction / weighted / infeasible / pathological tests when applicable, CLI/example-db/paper integration, and ILP-enabled workspace verification. - Otherwise, ensure the information provided is enough to implement a solver. **Example rules:** @@ -291,6 +297,6 @@ Run /review-pipeline to run agentic review (structural check, quality check, age | Dirty working tree | Use `pipeline_worktree.py prepare-issue-branch` — it stops before branching if the worktree is dirty | | Resuming wrong PR | Always validate `resume_pr.head_ref_name` contains `issue-{N}` before trusting it — GitHub search can return false positives | | `prepare-issue-branch` inside worktree | Skip it when inside a `run-pipeline` worktree (CWD under `.worktrees/`) — the branch already exists | -| Bundling model + rule in one PR | Each PR must contain exactly one model or one rule — STOP and block if model is missing (Step 3.5) | +| Bundling unrelated model + rule in one PR | Keep the normal one-item-per-PR rule. The only exception is a `[Model]` issue that explicitly claims direct ILP solving, which should ship with its direct ` -> ILP` rule | | Plan files left in PR | Delete plan files before final push (Step 7c) | | `make paper` or export steps changed tracked JSON after verification | Run `git status --short`, stage expected generated exports, and STOP if unexpected files remain before push | diff --git a/.claude/skills/propose/SKILL.md b/.claude/skills/propose/SKILL.md index 4293c5c5..6be6ec9e 100644 --- a/.claude/skills/propose/SKILL.md +++ b/.claude/skills/propose/SKILL.md @@ -299,10 +299,28 @@ Only fall back to the full `AskUserQuestion` if the inference is genuinely ambig 5. **Solving strategy** — The library's brute-force solver works on every problem by enumerating the configuration space. **Auto-fill "Brute-force" as the baseline** — do not present it as a choice. - If the problem has a natural ILP or QUBO formulation, note it for the companion rules section (Step 3b), not here: - > "Brute-force is the baseline solver. A natural ILP formulation also exists — we'll propose that as a companion reduction rule later." + If the problem also admits a natural **direct** ILP formulation, ask whether the issue should explicitly claim ILP solver support: + ``` + AskUserQuestion: + question: "This problem appears to admit a direct ILP formulation. Should the issue explicitly claim ILP solver support?" + header: "ILP solver" + options: + - label: "Yes — add direct → ILP (Recommended)" + description: "File a direct companion rule issue and mark the model as solvable via ILP" + - label: "No — brute-force only" + description: "Keep the model issue's solver section limited to brute-force unless another concrete solver exists" + ``` + + If the user picks **Yes**: + - Set `requires_ilp_companion = true` + - The companion rule must be a direct ` → ILP` issue filed in the **same** `/propose` session + - State in the draft that later implementation is expected to ship the model and this direct ILP rule in the same PR - **Only use `AskUserQuestion`** if the problem is polynomial-time solvable or has a specialized exact algorithm that should replace brute-force: + If the user picks **No**, keep the model issue's "How to solve" section limited to brute-force unless a different specialized solver is available. + + **Do not mention ILP/QUBO in the model issue's "How to solve" section unless there is a concrete companion rule issue number.** + + **Only use `AskUserQuestion`** beyond that if the problem is polynomial-time solvable or has a specialized exact algorithm that should replace brute-force: ``` AskUserQuestion: question: "This problem appears to be solvable in polynomial time. Which algorithm should be the primary solver?" @@ -314,8 +332,6 @@ Only fall back to the full `AskUserQuestion` if the inference is genuinely ambig description: "Use generic brute-force even though faster algorithms exist" ``` - **Do not present ILP/QUBO as solver options.** These are reductions to other problems, handled as companion rules in Step 3b. The "How to solve" section in the issue always says "Brute-force" for NP-hard problems. - 6. **Example** — Generate **at least 3** candidate examples yourself (varying in size and structure), then present via `AskUserQuestion`. **3 options is the minimum — never fewer.** Always include a "Generate new batch" escape hatch: ``` @@ -616,6 +632,7 @@ AskUserQuestion: **Ranking criteria** (in order of priority): - Connections that establish NP-hardness (from a problem reachable from 3-SAT) - **ILP solver path** — if the new model has no outgoing edges and admits a natural ILP formulation, ` → ILP` should be the top companion rule recommendation. This is the fastest way to make the problem solvable via the existing ILP solver infrastructure. +- If `requires_ilp_companion = true`, the direct ` → ILP` rule is mandatory, not optional - Connections to large clusters (QUBO, ILP, SAT families) - Connections that reduce orphan count or bridge disconnected components - Connections the user specifically mentioned during brainstorming @@ -628,6 +645,8 @@ If the user picks one or more rules from Step 3b (or proposes their own): For **each** selected rule, run through the rule brainstorming flow (algorithm, correctness, overhead, example, reference) — but keep it lighter since the model context is already established. +If `requires_ilp_companion = true`, one selected rule **must** be the direct ` → ILP` companion. Do not keep the ILP solver claim in the model draft while deferring that rule to a later issue. + If the user declines ("I'll file rules separately later"): - **Strongly warn** via `AskUserQuestion`: ``` @@ -641,6 +660,7 @@ If the user declines ("I'll file rules separately later"): description: "I understand the risk. I will file companion rule issues before review." ``` - If the user chooses "Let me propose a rule now", go back to Step 3b and let them pick a rule, then brainstorm it. +- If `requires_ilp_companion = true`, the "Skip anyway" option is unavailable unless the model draft is revised to remove the ILP solver claim and revert to brute-force only. - If the user still declines, include a placeholder in the model's "Reduction Rule Crossref" section noting which rules are planned, and add a visible warning in the draft: "⚠ No companion rule filed — this model will be an orphan node until a rule issue is created." --- @@ -667,7 +687,7 @@ If proposing a model + rules, present all drafts together: - Complexity (expression + citation + BibTeX) - Extra Remark (if applicable) - Reduction Rule Crossref (linking to companion rule issues or noting planned rules) -- How to solve (brute-force, ILP, or other — if ILP/QUBO, must cross-reference rule issue) +- How to solve (brute-force, direct ILP via companion rule if explicitly claimed, or other specialized solver — never claim ILP without a direct companion rule issue) - Example Instance - Expected Outcome - Optimization problems: optimal solution + optimal objective value @@ -701,6 +721,7 @@ Apply all 4 checks from `/check-issue` against the draft content: 2. **Non-trivial:** Not isomorphic to existing problem. 3. **Correctness:** Complexity expression verified against literature. 4. **Well-written:** All template sections present, symbols consistent, example exercises core structure, and Expected Outcome matches the problem type (valid solution for satisfaction, optimal solution/value for optimization). +5. **ILP claim consistency:** If the draft claims direct ILP solvability, verify the Reduction Rule Crossref includes a direct `[Rule] to ILP` companion issue. Claiming ILP without that concrete rule is a fail. **If any check fails:** Fix the draft automatically if possible. If user input is needed, ask. Loop back to Step 4 with the corrected draft. diff --git a/.github/ISSUE_TEMPLATE/problem.md b/.github/ISSUE_TEMPLATE/problem.md index 7611d171..41239788 100644 --- a/.github/ISSUE_TEMPLATE/problem.md +++ b/.github/ISSUE_TEMPLATE/problem.md @@ -68,8 +68,11 @@ E.g. historical context, notable applications, relationship to other problems, o At least one reduction rule (to or from this problem) must exist or be proposed, so the new problem is connected to the reduction graph. Link to existing rule issues or file new ones. +If you check the ILP solver option below, one linked rule MUST be a direct +`[Rule] to ILP` companion issue. --> +- [ ] #issue-number - [ ] #issue-number ## How to solve @@ -77,7 +80,12 @@ Link to existing rule issues or file new ones. Solver is required for reduction rule verification purpose. --> - [ ] It can be solved by (existing) brute force. -- [ ] It can be solved by reducing to ILP via #issue-number (please file a new rule issue if one does not exist). +- [ ] It can be solved by reducing directly to ILP via #issue-number. + - [ ] Other, refer to ... ## Example Instance @@ -97,7 +105,7 @@ Optimization: provide one optimal configuration and its objective value. Satisfaction: provide one valid / satisfying configuration and a brief justification. This will be stored as ground truth in the example database and cross-validated -by an ILP solver. +by brute force, and by an ILP reduction path when one is explicitly claimed above. --> ## BibTeX diff --git a/.gitignore b/.gitignore index 163ecc2c..a3380fb5 100644 --- a/.gitignore +++ b/.gitignore @@ -79,6 +79,7 @@ pkgref/ # Generated example outputs docs/paper/examples/ docs/paper/data/ +docs/plans/ # Claude Code logs claude-output.log diff --git a/docs/plans/2026-03-22-generalized-aggregation-design.md b/docs/plans/2026-03-22-generalized-aggregation-design.md deleted file mode 100644 index f467d20b..00000000 --- a/docs/plans/2026-03-22-generalized-aggregation-design.md +++ /dev/null @@ -1,477 +0,0 @@ -# Generalized Aggregation -- Unified Problem Trait Hierarchy - -**Date:** 2026-03-22 -**Status:** Revised design, approved for implementation planning -**Supersedes:** `2026-03-22-counting-problem-trait-design.md` - -## Problem - -The current trait hierarchy hard-codes two witness-oriented problem families: - -- `OptimizationProblem` (`Metric = SolutionSize`, plus `direction()`) -- `SatisfactionProblem` (`Metric = bool`) - -That works for "find one config" workflows, but it does not scale to `#P` and probability problems where the answer is an aggregate over the whole configuration space. Adding a third parallel leaf trait for counting would unblock the immediate issues, but it would also duplicate the same branching in solvers, macros, registry dispatch, and reduction execution. - -The goal of this design is to unify value aggregation while preserving the existing witness-oriented workflows that the repo already depends on: - -- brute-force witness search -- solution extraction through reduction chains -- `pred reduce` bundles -- `pred solve bundle.json` -- ILP solve-via-reduction - -## Core idea - -Unify the **value layer**, not the **witness layer**. - -Each problem exposes a single aggregate value type. Solvers always know how to compute the final value by folding over all configurations. Some aggregate types also support recovering representative witness configurations; others do not. - -This keeps the mathematical core small while making the runtime honest about which operations are valid. - -## `Aggregate` trait - -`Aggregate` remains a monoid at its core, but it also exposes optional witness hooks with safe defaults. That is the minimal extra surface needed to keep dynamic witness APIs working without re-introducing a full parallel trait hierarchy. - -```rust -// src/types.rs -pub trait Aggregate: Clone + fmt::Debug + Serialize + DeserializeOwned { - /// Neutral element for folding over the configuration space. - fn identity() -> Self; - - /// Associative combine operation. - fn combine(self, other: Self) -> Self; - - /// Whether this aggregate admits representative witness configurations. - fn supports_witnesses() -> bool { - false - } - - /// Whether a per-configuration value belongs to the witness set - /// for the final aggregate value. - fn contributes_to_witnesses(_config_value: &Self, _total: &Self) -> bool { - false - } -} -``` - -The default witness behavior is deliberately conservative: - -- `Sum` and `And` remain value-only -- `Max`, `Min`, and `Or` opt in to witness recovery - -## Aggregate types - -Five concrete aggregate wrappers replace the current leaf-trait split: - -```rust -// src/types.rs -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Max(pub Option); - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Min(pub Option); - -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Sum(pub W); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Or(pub bool); - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct And(pub bool); -``` - -| Type | Identity | Combine | Witness support | Replaces | -|------|----------|---------|-----------------|----------| -| `Max` | `Max(None)` | keep larger `Some` | yes | `SolutionSize` + `Direction::Maximize` | -| `Min` | `Min(None)` | keep smaller `Some` | yes | `SolutionSize` + `Direction::Minimize` | -| `Sum` | `Sum(W::zero())` | numeric addition | no | counting / probability totals | -| `Or` | `Or(false)` | logical or | yes | `bool` existential problems | -| `And` | `And(true)` | logical and | no | universal / tautology-style problems | - -Witness semantics: - -- `Max` / `Min`: a config is a witness iff its aggregate value equals the final optimum and is feasible -- `Or`: a config is a witness iff it evaluates to `Or(true)` and the final total is `Or(true)` -- `Sum` / `And`: no single config is a representative witness, so witness APIs return `None` / empty - -## Unified `Problem` trait - -```rust -// src/traits.rs -pub trait Problem: Clone { - const NAME: &'static str; - type Value: Aggregate; - - fn dims(&self) -> Vec; - fn evaluate(&self, config: &[usize]) -> Self::Value; - - fn num_variables(&self) -> usize { - self.dims().len() - } - - fn variant() -> Vec<(&'static str, &'static str)>; - - fn problem_type() -> crate::registry::ProblemType { - crate::registry::find_problem_type(Self::NAME) - .unwrap_or_else(|| panic!("no catalog entry for Problem::NAME = {:?}", Self::NAME)) - } -} -``` - -Removed: - -- `OptimizationProblem` -- `SatisfactionProblem` -- `type Metric` -- `SolutionSize` -- `Direction` - -Unchanged: - -- `DeclaredVariant` -- `Problem::NAME` -- `dims()` -- `variant()` -- catalog bridge via `problem_type()` - -## Solvers - -### Value solving - -All problems support value solving through one fold: - -```rust -// src/solvers/mod.rs -pub trait Solver { - fn solve(&self, problem: &P) -> P::Value; -} -``` - -```rust -// src/solvers/brute_force.rs -impl Solver for BruteForce { - fn solve(&self, problem: &P) -> P::Value { - DimsIterator::new(problem.dims()) - .map(|config| problem.evaluate(&config)) - .fold(P::Value::identity(), P::Value::combine) - } -} -``` - -### Witness solving - -Witness APIs remain available, but only when the aggregate type opts in through the default hooks above: - -```rust -impl BruteForce { - pub fn find_witness(&self, problem: &P) -> Option>; - pub fn find_all_witnesses(&self, problem: &P) -> Vec>; - pub fn solve_with_witnesses(&self, problem: &P) - -> (P::Value, Vec>); -} -``` - -Behavior: - -- `Max` / `Min`: witnesses are the optimal configs -- `Or`: witnesses are satisfying configs -- `Sum` / `And`: `find_witness()` returns `None`, `find_all_witnesses()` returns `[]` - -This is the key distinction from the counting-only design: value aggregation is unified, but witness recovery is explicitly optional. - -## Dynamic solve surfaces - -The dynamic registry needs two solve entry points, not one: - -```rust -// src/registry/dyn_problem.rs -pub type SolveValueFn = fn(&dyn Any) -> String; -pub type SolveWitnessFn = fn(&dyn Any) -> Option<(Vec, String)>; -``` - -`VariantEntry` stores both: - -- `solve_value_fn` always exists -- `solve_witness_fn` always exists, but returns `None` for aggregate-only values (`Sum`, `And`) - -`LoadedDynProblem` mirrors that split: - -- `solve_brute_force_value() -> String` -- `solve_brute_force_witness() -> Option<(Vec, String)>` - -This keeps `declare_variants!` simple: - -- the `opt` / `sat` keywords disappear -- the generated value-solve closure always calls `Solver::solve()` -- the generated witness-solve closure always calls `BruteForce::find_witness()` - -No solver-kind branching is needed at variant registration time. - -## CLI behavior - -### `pred solve problem.json` - -Always computes the aggregate value. - -- If a witness exists, print both `Solution` and `Evaluation` -- If no witness exists, print only `Evaluation` - -Examples: - -- `Max(Some(42))` -> solution config + `Maximum: 42` -- `Or(true)` -> solution config + `Satisfiable: true` -- `Sum(0.9832)` -> no single solution config, print `Sum: 0.9832` -- `And(false)` -> no single solution config, print `Tautology: false` - -### `pred solve bundle.json` - -Remains a **witness-only** workflow in this design. - -Bundles exist to solve a target problem and map a target configuration back through `extract_solution`. That makes sense only for witness-capable problems and witness-capable reduction paths. - -If the target variant or the path is aggregate-only, bundle solving is rejected early with a clear error. - -### `--solver ilp` - -Also remains **witness-only**. - -ILP support in this repo is a witness-producing solve-via-reduction path. Aggregate-only problems (`Sum`, `And`) do not have an ILP mode unless a future design introduces a threshold or certificate-bearing witness formulation. - -The immediate design change is: - -- keep the ILP solver internals unchanged -- require witness-capable source problems -- require a witness-capable path from source to `ILP` - -## Reductions - -Two reduction traits remain necessary because config mapping and aggregate-value mapping are genuinely different operations. - -```rust -// src/rules/traits.rs -pub trait ReductionResult { - type Source: Problem; - type Target: Problem; - fn target_problem(&self) -> &Self::Target; - fn extract_solution(&self, target_solution: &[usize]) -> Vec; -} - -pub trait ReduceTo: Problem { - type Result: ReductionResult; - fn reduce_to(&self) -> Self::Result; -} - -pub trait AggregateReductionResult { - type Source: Problem; - type Target: Problem; - fn target_problem(&self) -> &Self::Target; - fn extract_value( - &self, - target_value: ::Value, - ) -> ::Value; -} - -pub trait ReduceToAggregate: Problem { - type Result: AggregateReductionResult; - fn reduce_to_aggregate(&self) -> Self::Result; -} -``` - -Type-erased runtime support likewise splits: - -- `DynReductionResult` for witness/config reductions -- `DynAggregateReductionResult` for aggregate/value reductions - -## `EdgeCapabilities` - -The reduction graph needs explicit edge-mode metadata so path search can reject incompatible paths before execution. - -```rust -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub struct EdgeCapabilities { - pub witness: bool, - pub aggregate: bool, -} -``` - -Capability assignment: - -- `ReduceTo` edges -> `{ witness: true, aggregate: false }` -- `ReduceToAggregate` edges -> `{ witness: false, aggregate: true }` -- natural subtype / `ReductionAutoCast` edges -> `{ witness: true, aggregate: true }` - -Why the natural edges are both: - -- witness mode: the config mapping is identity -- aggregate mode: the value mapping is also identity because the problem semantics do not change - -## Mode-aware pathfinding - -Pathfinding stays on one graph, but it now receives a required capability: - -```rust -pub enum ReductionMode { - Witness, - Aggregate, -} -``` - -`ReductionGraph::find_cheapest_path(...)` becomes capability-aware: - -- witness callers traverse only edges with `capabilities.witness` -- aggregate callers traverse only edges with `capabilities.aggregate` - -This prevents "valid graph path, invalid runtime execution" failures. - -Mode usage: - -- `pred reduce` -> witness -- `pred solve bundle.json` -> witness -- ILP solve-via-reduction -> witness -- future aggregate chain execution -> aggregate -- graph export / inspection -> all edges, with capability metadata shown - -## Aggregate reduction chains - -Witness execution stays on `ReductionChain`. - -Aggregate execution gets its own chain: - -```rust -pub struct AggregateReductionChain { - steps: Vec>, -} -``` - -with: - -- `target_problem_any()` -- backwards composition of `extract_value_dyn(...)` - -The important point is that witness execution and aggregate execution are separate entry points over the same graph, selected by `ReductionMode`. - -## Registry and graph changes - -### `ReductionEntry` - -`ReductionEntry` gains: - -- `reduce_fn: Option` -- `reduce_aggregate_fn: Option` -- `capabilities: EdgeCapabilities` - -### `ReductionEdgeData` - -`ReductionEdgeData` gains: - -- `capabilities: EdgeCapabilities` -- optional witness executor -- optional aggregate executor - -### Graph export - -The JSON export includes: - -- `witness: bool` -- `aggregate: bool` - -instead of a single coarse edge-kind label. - -## Model migration examples - -### Optimization - -```rust -impl Problem for MaximumIndependentSet { - type Value = Max; - - fn evaluate(&self, config: &[usize]) -> Max { - if invalid { - Max(None) - } else { - Max(Some(size)) - } - } -} -``` - -### Satisfaction - -```rust -impl Problem for Satisfiability { - type Value = Or; - - fn evaluate(&self, config: &[usize]) -> Or { - Or(satisfies) - } -} -``` - -### Counting - -```rust -impl Problem for NetworkReliability { - type Value = Sum; - - fn evaluate(&self, config: &[usize]) -> Sum { - if terminals_connected { - Sum(probability_weight) - } else { - Sum(0.0) - } - } -} -``` - -## Migration scope - -| Area | Change | -|------|--------| -| `src/types.rs` | replace `SolutionSize` / `Direction` with aggregate wrappers and witness hooks | -| `src/traits.rs` | unify on `Problem` | -| `src/solvers/` | one value fold plus generic witness helpers | -| `src/registry/` | split value solve from witness solve | -| `problemreductions-macros/` | remove `opt` / `sat`, emit both dynamic solve closures | -| `src/rules/` | add aggregate reductions and capability-aware path execution | -| `problemreductions-cli/` | differentiate value-only vs witness-capable solve output | -| existing model/test files | mechanical `Metric -> Value` migration | - -## What is not changed - -- problem names, aliases, and variant resolution -- the overall CLI command set -- the catalog bridge via `ProblemType` -- the fact that ILP is a witness-oriented backend -- the paper format in `docs/paper/reductions.typ` - -## Deferred follow-up work - -Out of scope for this design revision: - -- threshold-specific decision wrappers for `Sum` problems -- a new aggregate-only bundle format -- universal counterexample extraction for `And` -- choosing default reduction modes in graph-inspection UX - -## Alternatives considered - -1. **Minimal `CountingProblem` extension** - - Lowest short-term diff - - Repeats the branching in solvers, registry dispatch, macros, and reductions - -2. **Unify value aggregation but keep witness-oriented runtime explicit** (chosen) - - Solves the architectural duplication - - Preserves the witness assumptions already embedded in the repo - -3. **Single edge kind with runtime rejection** - - Smaller patch - - Bad UX and bad API: pathfinding would still return paths that cannot be executed - -## Related issues - -- #737 -- original aggregation architecture issue -- #748 -- default solver per problem (future, orthogonal) -- #235, #237, #404, #405 -- counting models enabled by this refactor -- #256, #257, #394, #395 -- aggregate-value reductions enabled by this refactor From 0e30dd478299a53c0e11de9ec2e701567126f23c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 20:03:35 +0800 Subject: [PATCH 5/6] fix: resolve ILP review findings --- .../strongconnectivityaugmentation_ilp.rs | 9 ------ src/solvers/ilp/solver.rs | 2 +- .../rules/rootedtreestorageassignment_ilp.rs | 28 +++++-------------- .../rules/stringtostringcorrection_ilp.rs | 7 +++++ .../strongconnectivityaugmentation_ilp.rs | 19 +++++++++++++ src/unit_tests/solvers/ilp/solver.rs | 13 +++++++++ 6 files changed, 47 insertions(+), 31 deletions(-) diff --git a/src/rules/strongconnectivityaugmentation_ilp.rs b/src/rules/strongconnectivityaugmentation_ilp.rs index 5680afad..3e95e7b6 100644 --- a/src/rules/strongconnectivityaugmentation_ilp.rs +++ b/src/rules/strongconnectivityaugmentation_ilp.rs @@ -41,15 +41,6 @@ impl ReduceTo> for StrongConnectivityAugmentation { let n = self.num_vertices(); let p = self.num_potential_arcs(); - // Trivial: n ≤ 1 already strongly connected - if n <= 1 { - let target = ILP::new(p, vec![], vec![], ObjectiveSense::Minimize); - return ReductionSCAToILP { - target, - num_candidates: p, - }; - } - let base_arcs = self.graph().arcs(); let m = base_arcs.len(); let root = 0; diff --git a/src/solvers/ilp/solver.rs b/src/solvers/ilp/solver.rs index 0f12d556..4bb6d50d 100644 --- a/src/solvers/ilp/solver.rs +++ b/src/solvers/ilp/solver.rs @@ -88,7 +88,7 @@ impl ILPSolver { pub fn solve(&self, problem: &ILP) -> Option> { let n = problem.num_vars; if n == 0 { - return Some(vec![]); + return problem.is_feasible(&[]).then_some(vec![]); } // Create integer variables with bounds from variable domain diff --git a/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs b/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs index fcd3e7ec..d21d180a 100644 --- a/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs +++ b/src/unit_tests/rules/rootedtreestorageassignment_ilp.rs @@ -48,15 +48,8 @@ fn test_rootedtreestorageassignment_to_ilp_bf_vs_ilp() { #[test] fn test_rootedtreestorageassignment_to_ilp_infeasible() { - // 3 elements, subsets {0,1},{1,2},{0,2} with bound 0: - // All 3 subsets must have extension cost 0 => all pairs are ancestor chains. - // But {0,1},{1,2},{0,2} can't all be chains with cost 0 in a rooted tree - // unless all 3 elements are on one path (chain 0-1-2), which gives cost 0 for all. - // Actually that is feasible: root=0, parent(1)=0, parent(2)=1, depth 0,1,2. - // Let's make it truly infeasible with a strict bound: - // 4 elements, subsets {0,1},{2,3},{0,2},{1,3} bound 0. - // This requires all to be on chains of cost 0 (perfect paths), which is impossible - // for crossing pairs. + // With bound 0, every 2-element subset must appear as a parent-child pair. + // The crossing pairs below force vertex 3 to have two distinct parents. let problem = RootedTreeStorageAssignment::new( 4, vec![vec![0, 1], vec![2, 3], vec![0, 2], vec![1, 3]], @@ -69,18 +62,11 @@ fn test_rootedtreestorageassignment_to_ilp_infeasible() { let reduction: ReductionRTSAToILP = ReduceTo::>::reduce_to(&problem); let ilp_solver = ILPSolver::new(); let ilp_result = ilp_solver.solve(reduction.target_problem()); - - match ilp_result { - Some(ilp_solution) => { - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_value = problem.evaluate(&extracted); - assert!(ilp_value.0, "ILP solution should be feasible"); - assert!(bf_witness.is_some(), "BF should also find a solution"); - } - None => { - assert!(bf_witness.is_none(), "both should agree on infeasibility"); - } - } + assert!(bf_witness.is_none(), "source should be infeasible"); + assert!( + ilp_result.is_none(), + "reduced ILP should also be infeasible" + ); } #[test] diff --git a/src/unit_tests/rules/stringtostringcorrection_ilp.rs b/src/unit_tests/rules/stringtostringcorrection_ilp.rs index 823b3871..3b1983b0 100644 --- a/src/unit_tests/rules/stringtostringcorrection_ilp.rs +++ b/src/unit_tests/rules/stringtostringcorrection_ilp.rs @@ -58,6 +58,13 @@ fn test_stringtostringcorrection_to_ilp_infeasible() { let bf = BruteForce::new(); let bf_witness = bf.find_witness(&problem); assert!(bf_witness.is_none(), "source should be infeasible"); + + let reduction: ReductionSTSCToILP = ReduceTo::>::reduce_to(&problem); + let ilp_solver = ILPSolver::new(); + assert!( + ilp_solver.solve(reduction.target_problem()).is_none(), + "reduced ILP should also be infeasible" + ); } #[test] diff --git a/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs b/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs index f7f819ff..cb765f5c 100644 --- a/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs +++ b/src/unit_tests/rules/strongconnectivityaugmentation_ilp.rs @@ -60,6 +60,25 @@ fn test_trivial_single_vertex() { assert!(source.evaluate(&extracted).0); } +#[test] +fn test_single_vertex_candidate_selection_must_still_respect_budget() { + let source = + StrongConnectivityAugmentation::new(DirectedGraph::new(1, vec![]), vec![(0, 0, 1)], 0); + let reduction: ReductionSCAToILP = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + let mut config = vec![0; ilp.num_vars()]; + config[0] = 1; + + assert!( + !source.evaluate(&[1]).0, + "source rejects the over-budget candidate" + ); + assert!( + !ilp.evaluate(&config).is_valid(), + "reduced ILP must reject the same candidate selection" + ); +} + #[test] fn test_infeasible_budget() { // 3 vertices 0->1->2, only candidate is (2,0,10), budget=5 diff --git a/src/unit_tests/solvers/ilp/solver.rs b/src/unit_tests/solvers/ilp/solver.rs index 02f527ed..310ab6fa 100644 --- a/src/unit_tests/solvers/ilp/solver.rs +++ b/src/unit_tests/solvers/ilp/solver.rs @@ -89,6 +89,19 @@ fn test_ilp_empty_problem() { assert_eq!(solution, Some(vec![])); } +#[test] +fn test_ilp_empty_problem_with_infeasible_constraint_returns_none() { + let ilp = ILP::::new( + 0, + vec![LinearConstraint::le(vec![], -1.0)], + vec![], + ObjectiveSense::Minimize, + ); + let solver = ILPSolver::new(); + let solution = solver.solve(&ilp); + assert_eq!(solution, None); +} + #[test] fn test_ilp_equality_constraint() { // Minimize x0 subject to x0 + x1 == 1, binary vars From a40ba65398c9d2b37dcc5b5763c5cee34c67238c Mon Sep 17 00:00:00 2001 From: GiggleLiu Date: Tue, 24 Mar 2026 20:33:07 +0800 Subject: [PATCH 6/6] test: fix stale CLI ILP coverage --- Makefile | 4 ++-- problemreductions-cli/tests/cli_tests.rs | 19 ++++++++++++++----- problemreductions-macros/src/lib.rs | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 3348dd91..fa9689c3 100644 --- a/Makefile +++ b/Makefile @@ -58,9 +58,9 @@ help: build: cargo build --features ilp-highs -# Run all tests (including ignored tests) +# Run all workspace tests (including ignored tests) test: - cargo test --features "ilp-highs example-db" -- --include-ignored + cargo test --features "ilp-highs example-db" --workspace -- --include-ignored # Run MCP server tests mcp-test: ## Run MCP server tests diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs index daba3991..a8ead494 100644 --- a/problemreductions-cli/tests/cli_tests.rs +++ b/problemreductions-cli/tests/cli_tests.rs @@ -163,7 +163,7 @@ fn test_create_stacker_crane_schema_help_uses_documented_flags() { } #[test] -fn test_solve_balanced_complete_bipartite_subgraph_suggests_bruteforce() { +fn test_solve_balanced_complete_bipartite_subgraph_default_solver_uses_ilp() { let tmp = std::env::temp_dir().join("pred_test_bcbs_problem.json"); let create = pred() .args([ @@ -181,11 +181,20 @@ fn test_solve_balanced_complete_bipartite_subgraph_suggests_bruteforce() { .args(["solve", tmp.to_str().unwrap()]) .output() .unwrap(); - assert!(!solve.status.success()); - let stderr = String::from_utf8(solve.stderr).unwrap(); assert!( - stderr.contains("--solver brute-force"), - "expected brute-force hint, got: {stderr}" + solve.status.success(), + "stderr: {}", + String::from_utf8_lossy(&solve.stderr) + ); + let stdout = String::from_utf8(solve.stdout).unwrap(); + let json: serde_json::Value = serde_json::from_str(&stdout).unwrap(); + assert_eq!(json["problem"], "BalancedCompleteBipartiteSubgraph"); + assert_eq!(json["solver"], "ilp"); + assert_eq!(json["reduced_to"], "ILP"); + assert_eq!(json["evaluation"], "Or(true)"); + assert!( + json["solution"].as_array().is_some_and(|solution| !solution.is_empty()), + "expected a non-empty solution array, got: {stdout}" ); std::fs::remove_file(tmp).ok(); diff --git a/problemreductions-macros/src/lib.rs b/problemreductions-macros/src/lib.rs index 0cbdf482..cd9d66b8 100644 --- a/problemreductions-macros/src/lib.rs +++ b/problemreductions-macros/src/lib.rs @@ -430,7 +430,7 @@ impl syn::parse::Parse for DeclareVariantsInput { /// /// # Example /// -/// ```ignore +/// ```text /// declare_variants! { /// MaximumIndependentSet => "1.1996^num_vertices", /// MaximumIndependentSet => "2^sqrt(num_vertices)",