diff --git a/Project.toml b/Project.toml index b888055b..54ebbaff 100644 --- a/Project.toml +++ b/Project.toml @@ -17,9 +17,22 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [compat] BlockDiagonals = "0.1" ChainRulesCore = "1" +HiGHS = "1" +Ipopt = "1" IterativeSolvers = "0.9" -JuMP = "0.23, 1" +JuMP = "1" LazyArrays = "0.21, 0.22, 1" MathOptInterface = "1" MathOptSetDistances = "0.2" +SCS = "1" julia = "1.6" + +[extras] +DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" +HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" +Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["DelimitedFiles", "HiGHS", "Ipopt", "SCS", "Test"] diff --git a/test/Project.toml b/test/Project.toml deleted file mode 100644 index 8f9b8cf8..00000000 --- a/test/Project.toml +++ /dev/null @@ -1,19 +0,0 @@ -[deps] -ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -JuMP = "4076af6c-e467-56ae-b986-b466b2749572" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" -HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" -MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" -Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[compat] -Ipopt = "1.0.2" diff --git a/test/bridges.jl b/test/bridges.jl deleted file mode 100644 index df3f5e23..00000000 --- a/test/bridges.jl +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -module TestBridges - -using Test - -function runtests() - for name in names(@__MODULE__; all = true) - if startswith("$name", "test_") - @testset "$(name)" begin - getfield(@__MODULE__, name)() - end - end - end -end - -import MathOptInterface as MOI -import DiffOpt - -function test_quad_to_soc() - model = DiffOpt.ConicProgram.Model() - F = MOI.VectorAffineFunction{Float64} - S = MOI.RotatedSecondOrderCone - # Adds the set type to the list - @test MOI.supports_constraint(model, F, S) - bridged = MOI.Bridges.Constraint.QuadtoSOC{Float64}(model) - x = MOI.add_variables(bridged, 3) - # We pick orthogonal rows but this is not the `U` that cholesky is going to take in the bridge - U = [ - 1 -1 1 - 1 2 1 - 1 0 -1.0 - ] - a = [1, 2, -3.0] - b = -5.0 - Q = U' * U - f = x' * Q * x / 2.0 + a' * x - c = MOI.add_constraint(bridged, f, MOI.LessThan(-b)) - dQ = [ - 1 -1 0 - -1 2 1 - 0 1 3.0 - ] - da = [-1, 1.0, -2] - db = 3.0 - df = x' * dQ * x / 2.0 + da' * x + db - MOI.Utilities.final_touch(bridged, nothing) - MOI.set(bridged, DiffOpt.ForwardConstraintFunction(), c, df) - return -end - -function _test_dU_from_dQ(U, dU) - dQ = dU'U + U'dU - _dU = copy(dQ) - __dU = copy(dQ) - # Compiling - DiffOpt.dU_from_dQ!(__dU, U) - @test @allocated(DiffOpt.dU_from_dQ!(_dU, U)) == 0 - @test _dU ≈ dU -end - -function test_dU_from_dQ() - _test_dU_from_dQ(2ones(1, 1), 3ones(1, 1)) - U = [ - 1 2 - 0 1 - ] - dU = [ - 1 -1 - 0 1 - ] - _test_dU_from_dQ(U, dU) - U = [ - -3 5 - 0 2.5 - ] - dU = [ - 2 3.5 - 0 -2 - ] - _test_dU_from_dQ(U, dU) - U = [ - 1.5 2 -1 - 0 -1 3.5 - 0 0 -2 - ] - dU = [ - 2.5 -1 2 - 0 5 -3 - 0 0 3 - ] - return _test_dU_from_dQ(U, dU) -end - -end - -TestBridges.runtests() diff --git a/test/conic_program.jl b/test/conic_program.jl new file mode 100644 index 00000000..a3d4a939 --- /dev/null +++ b/test/conic_program.jl @@ -0,0 +1,929 @@ +# Copyright (c) 2020: Akshay Sharma and contributors +# +# Use of this source code is governed by an MIT-style license that can be found +# in the LICENSE.md file or at https://opensource.org/licenses/MIT. + +module TestConicProgram + +using Test +import DiffOpt +import Ipopt +import LinearAlgebra +import MathOptInterface as MOI +import SCS + +const ATOL = 2e-4 +const RTOL = 2e-4 + +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$name", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end + end + return +end + +function _test_simple_socp(eq_vec::Bool) + # referred from _soc2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L1355 + # find equivalent diffcp python program here: https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_socp_1_py.ipynb + # Problem SOC2 + # min x + # s.t. y ≥ 1/√2 + # x² + y² ≤ 1 + # in conic form: + # min x + # s.t. -1/√2 + y ∈ R₊ + # 1 - t ∈ {0} + # (t,x,y) ∈ SOC₃ + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + x, y, t = MOI.add_variables(model, 3) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + 1.0x, + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + if eq_vec + ceq = MOI.add_constraint( + model, + MOI.Utilities.vectorize([-1.0t + 1.0]), + MOI.Zeros(1), + ) + else + ceq = MOI.add_constraint(model, -1.0t, MOI.EqualTo(-1.0)) + end + cnon = MOI.add_constraint( + model, + MOI.Utilities.vectorize([1.0y - 1 / √2]), + MOI.Nonnegatives(1), + ) + csoc = MOI.add_constraint( + model, + MOI.Utilities.vectorize([1.0t, 1.0x, 1.0y]), + MOI.SecondOrderCone(3), + ) + MOI.optimize!(model) + if eq_vec + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + ceq, + MOI.Utilities.vectorize([1.0 * x]), + ) + else + MOI.set(model, DiffOpt.ForwardConstraintFunction(), ceq, 1.0 * x) + end + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + cnon, + MOI.Utilities.vectorize([1.0 * y]), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + csoc, + MOI.Utilities.operate(vcat, Float64, 1.0 * t, 0.0, 0.0), + ) + DiffOpt.forward_differentiate!(model) + # these matrices are benchmarked with the output generated by diffcp + # refer the python file mentioned above to get equivalent python source code + @test model.diff.model.x ≈ [-1 / √2; 1 / √2; 1.0] atol = ATOL rtol = RTOL + if eq_vec + @test model.diff.model.s ≈ [0.0, 0.0, 1.0, -1 / √2, 1 / √2] atol = ATOL rtol = + RTOL + @test model.diff.model.y ≈ [√2, 1.0, √2, 1.0, -1.0] atol = ATOL rtol = + RTOL + else + @test model.diff.model.s ≈ [0.0, 1.0, -1 / √2, 1 / √2, 0.0] atol = ATOL rtol = + RTOL + @test model.diff.model.y ≈ [1.0, √2, 1.0, -1.0, √2] atol = ATOL rtol = + RTOL + end + dx = [1.12132144; 1 / √2; 1 / √2] + for (i, vi) in enumerate([x, y, t]) + @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + ATOL rtol = RTOL + end + # @test dx ≈ [1.12132144; 1/√2; 1/√2] atol=ATOL rtol=RTOL + # @test ds ≈ [0.0; 0.0; -2.92893438e-01; 1.12132144e+00; 7.07106999e-01] atol=ATOL rtol=RTOL + # @test dy ≈ [2.4142175; 5.00000557; 3.8284315; √2; -4.00000495] atol=ATOL rtol=RTOL + return +end + +test_differentiating_simple_SOCP_vector() = _test_simple_socp(true) + +test_differentiating_simple_SOCP_scalar() = _test_simple_socp(false) + +# refered from _psd0test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L3919 +# find equivalent diffcp program here: https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_1_py.ipynb + +# min X[1,1] + X[2,2] max y +# X[2,1] = 1 [0 y/2 [ 1 0 +# y/2 0 <= 0 1] +# X >= 0 y free +# Optimal solution: +# +# ⎛ 1 1 ⎞ +# X = ⎜ ⎟ y = 2 +# ⎝ 1 1 ⎠ +function test_simple_psd() + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + X = MOI.add_variables(model, 3) + vov = MOI.VectorOfVariables(X) + cX = MOI.add_constraint( + model, + MOI.VectorAffineFunction{Float64}(vov), + MOI.PositiveSemidefiniteConeTriangle(2), + ) + c = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + [MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, X[2]))], + [-1.0], + ), + MOI.Zeros(1), + ) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + 1.0 * X[1] + 1.0 * X[end], + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.optimize!(model) + x = MOI.get(model, MOI.VariablePrimal(), X) + cone_types = unique([ + S for (F, S) in + MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) + ]) + conic_form = DiffOpt.ConicProgram.Form{Float64}() + cones = conic_form.constraints.sets + DiffOpt.set_set_types(cones, cone_types) + index_map = MOI.copy_to(conic_form, model) + # s = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintPrimal(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) + # y = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintDual(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) + @test x ≈ ones(3) atol = ATOL rtol = RTOL + # @test s ≈ [0.0; ones(3)] atol=ATOL rtol=RTOL + # @test y ≈ [2.0, 1.0, -1.0, 1.0] atol=ATOL rtol=RTOL + # test1: changing the constant in `c`, i.e. changing value of X[2] + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.VectorAffineFunction{Float64}( + MOI.ScalarAffineTerm{Float64}[], + [1.0], + ), + ) + DiffOpt.forward_differentiate!(model) + dx = -ones(3) + for (i, vi) in enumerate(X) + @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + ATOL rtol = RTOL + end + # @test dx ≈ -ones(3) atol=ATOL rtol=RTOL # will change the value of other 2 variables + # @test ds[2:4] ≈ -ones(3) atol=ATOL rtol=RTOL # will affect PSD constraint too + # test2: changing X[1], X[3] but keeping the objective (their sum) same + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.Utilities.zero_with_output_dimension( + MOI.VectorAffineFunction{Float64}, + 1, + ), + ) + MOI.set(model, DiffOpt.ForwardObjectiveFunction(), -1.0X[1] + 1.0X[3]) + DiffOpt.forward_differentiate!(model) + # @test dx ≈ [1.0, 0.0, -1.0] atol=ATOL rtol=RTOL # note: no effect on X[2] + dx = [1.0, 0.0, -1.0] + for (i, vi) in enumerate(X) + @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + ATOL rtol = RTOL + end + return +end + +function test_differentiating_conic_with_PSD_and_SOC_constraints() + # similar to _psd1test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4054 + # find equivalent diffcp example here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_2_py.ipynb + + # | 2 1 0 | + # min | 1 2 1 | . X + x1 + # | 0 1 2 | + # + # + # s.t. | 1 0 0 | + # | 0 1 0 | . X + x1 = 1 + # | 0 0 1 | + # + # | 1 1 1 | + # | 1 1 1 | . X + x2 + x3 = 1/2 + # | 1 1 1 | + # + # (x1,x2,x3) in C^3_q + # X in C_psd + # + # The dual is + # max y1 + y2/2 + # + # s.t. | y1+y2 y2 y2 | + # | y2 y1+y2 y2 | in C_psd + # | y2 y2 y1+y2 | + # + # (1-y1, -y2, -y2) in C^3_q + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + δ = √(1 + (3 * √2 + 2) * √(-116 * √2 + 166) / 14) / 2 + ε = √((1 - 2 * (√2 - 1) * δ^2) / (2 - √2)) + y2 = 1 - ε * δ + y1 = 1 - √2 * y2 + obj = y1 + y2 / 2 + k = -2 * δ / ε + x2 = ((3 - 2obj) * (2 + k^2) - 4) / (4 * (2 + k^2) - 4 * √2) + α = √(3 - 2obj - 4x2) / 2 + β = k * α + X = MOI.add_variables(model, 6) + x = MOI.add_variables(model, 3) + vov = MOI.VectorOfVariables(X) + cX = MOI.add_constraint( + model, + MOI.VectorAffineFunction{Float64}(vov), + MOI.PositiveSemidefiniteConeTriangle(3), + ) + cx = MOI.add_constraint( + model, + MOI.VectorAffineFunction{Float64}(MOI.VectorOfVariables(x)), + MOI.SecondOrderCone(3), + ) + c1 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.( + 1:1, + MOI.ScalarAffineTerm.( + [1.0, 1.0, 1.0, 1.0], + [X[1], X[3], X[end], x[1]], + ), + ), + [-1.0], + ), + MOI.Zeros(1), + ) + c2 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.( + 1:1, + MOI.ScalarAffineTerm.( + [1.0, 2, 1, 2, 2, 1, 1, 1], + [X; x[2]; x[3]], + ), + ), + [-0.5], + ), + MOI.Zeros(1), + ) + # this is a useless constraint - refer the tests below + # even if we comment this, it won't affect the optimal values + c_extra = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.(1:1, MOI.ScalarAffineTerm.(ones(3), x)), + [100.0], + ), + MOI.Nonnegatives(1), + ) + objXidx = [1:3; 5:6] + objXcoefs = 2 * ones(5) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + MOI.ScalarAffineFunction( + MOI.ScalarAffineTerm.([objXcoefs; 1.0], [X[objXidx]; x[1]]), + 0.0, + ), + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.optimize!(model) + _x = MOI.get(model, MOI.VariablePrimal(), x) + _X = MOI.get(model, MOI.VariablePrimal(), X) + cone_types = unique([ + S for (F, S) in + MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) + ]) + conic_form = DiffOpt.ConicProgram.Form{Float64}() + cones = conic_form.constraints.sets + DiffOpt.set_set_types(cones, cone_types) + index_map = MOI.copy_to(conic_form, model) + # s = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintPrimal(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) + # y = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintDual(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) + @test _X ≈ [ + 0.21725121 + -0.25996907 + 0.31108582 + 0.21725009 + -0.25996907 + 0.21725121 + ] atol = ATOL rtol = RTOL + @test _x ≈ [0.2544097; 0.17989425; 0.17989425] atol = ATOL rtol = RTOL + # @test x ≈ [ 0.21725121; -0.25996907; 0.31108582; 0.21725009; -0.25996907; 0.21725121; + # 0.2544097; 0.17989425; 0.17989425] atol=ATOL rtol=RTOL + # @test s ≈ [ + # 0.0, 0.0, 100.614, + # 0.254408, 0.179894, 0.179894, + # 0.217251, -0.25997, 0.31109, 0.217251, -0.25997, 0.217251, + # ] atol=ATOL rtol=RTOL + # TODO: it should be 100, not 100.614, its surely a residual error + # Joaquim: it is not an error it is sum(_x) + # there seemss to be an issue with this test (note the low precision) + # + # @test y ≈ [ + # 0.544758, 0.321905, 0.0, + # 0.455242, -0.321905, -0.321905, + # 1.13334, 0.678095, 1.13334, -0.321905, 0.678095, 1.13334, + # ] atol=ATOL rtol=RTOL + # test c_extra + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c_extra, + MOI.VectorAffineFunction{Float64}( + MOI.ScalarAffineTerm{Float64}[], + [1.0], + ), + ) + DiffOpt.forward_differentiate!(model) + # a small change in the constant in c_extra should not affect any other variable or constraint other than c_extra itself + for (i, vi) in enumerate(X) + @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + 1e-2 rtol = RTOL + end + for (i, vi) in enumerate(x) + @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + 1e-2 rtol = RTOL + end + # @test dx ≈ zeros(9) atol=1e-2 + # @test dy ≈ zeros(12) atol=0.012 + # @test [ds[1:2]; ds[4:end]] ≈ zeros(11) atol=1e-2 + # @test ds[3] ≈ 1.0 atol=1e-2 # except c_extra itself + return +end + +function test_differentiating_conic_with_PSD_and_POS_constraints() + # refer psdt2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4306 + # find equivalent diffcp program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_3_py.ipynb + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variables(model, 7) + @test MOI.get(model, MOI.NumberOfVariables()) == 7 + η = 10.0 + c1 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.(1, MOI.ScalarAffineTerm.(-1.0, x[1:6])), + [η], + ), + MOI.Nonnegatives(1), + ) + c2 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.(1:6, MOI.ScalarAffineTerm.(1.0, x[1:6])), + zeros(6), + ), + MOI.Nonnegatives(6), + ) + α = 0.8 + δ = 0.9 + c3 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.( + [fill(1, 7); fill(2, 5); fill(3, 6)], + MOI.ScalarAffineTerm.( + [ + δ / 2, + α, + δ, + δ / 4, + δ / 8, + 0.0, + -1.0, + -δ / (2 * √2), + -δ / 4, + 0, + -δ / (8 * √2), + 0.0, + δ / 2, + δ - α, + 0, + δ / 8, + δ / 4, + -1.0, + ], + [x[1:7]; x[1:3]; x[5:6]; x[1:3]; x[5:7]], + ), + ), + zeros(3), + ), + MOI.PositiveSemidefiniteConeTriangle(2), + ) + c4 = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + MOI.VectorAffineTerm.( + 1, + MOI.ScalarAffineTerm.(0.0, [x[1:3]; x[5:6]]), + ), + [0.0], + ), + MOI.Zeros(1), + ) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x[7])], 0.0), + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE) + MOI.optimize!(model) + # dc = ones(7) + MOI.set( + model, + DiffOpt.ForwardObjectiveFunction(), + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(ones(7), x), 0.0), + ) + # db = ones(11) + # dA = ones(11, 7) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c1, + MOI.Utilities.vectorize(ones(1, 7) * x + ones(1)), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c2, + MOI.Utilities.vectorize(ones(6, 7) * x + ones(6)), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c3, + MOI.Utilities.vectorize(ones(3, 7) * x + ones(3)), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c4, + MOI.Utilities.vectorize(ones(1, 7) * x + ones(1)), + ) + DiffOpt.forward_differentiate!(model) + @test model.diff.model.x ≈ + [20 / 3.0, 0.0, 10 / 3.0, 0.0, 0.0, 0.0, 1.90192379] atol = ATOL rtol = + RTOL + @test model.diff.model.s ≈ [ + 0.0, + 0.0, + 20 / 3.0, + 0.0, + 10 / 3.0, + 0.0, + 0.0, + 0.0, + 4.09807621, + -2.12132, + 1.09807621, + ] atol = ATOL rtol = RTOL + @test model.diff.model.y ≈ [ + 0.0, + 0.19019238, + 0.0, + 0.12597667, + 0.0, + 0.14264428, + 0.14264428, + 0.01274047, + 0.21132487, + 0.408248, + 0.78867513, + ] atol = ATOL rtol = RTOL + atol = 0.3 + rtol = 0.01 + # compare these with https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_3_py.ipynb + # results are not exactly as: 1. there is some residual error 2. diffcp results are SCS specific, hence scaled + dx = [-39.6066, 10.8953, -14.9189, 10.9054, 10.883, 10.9118, -21.7508] + for (i, vi) in enumerate(x) + @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = + atol rtol = rtol + end + # @test dy ≈ [0.0, -3.56905, 0.0, -0.380035, 0.0, -0.41398, -0.385321, -0.00743119, -0.644986, -0.550542, -2.36765] atol=atol rtol=rtol + # @test ds ≈ [0.0, 0.0, -50.4973, 0.0, -25.8066, 0.0, 0.0, 0.0, -7.96528, -1.62968, -2.18925] atol=atol rtol=rtol + # TODO: future example, how to differentiate wrt a specific constraint/variable, refer QPLib article for more + dA = zeros(11, 7) + dA[3:8, 1:6] = Matrix{Float64}(LinearAlgebra.I, 6, 6) # differentiating only wrt POS constraint c2 + db = zeros(11) + dc = zeros(7) + # db = zeros(11) + # dA = zeros(11, 7) + # dA[3:8, 1:6] = Matrix{Float64}(LinearAlgebra.I, 6, 6) # differentiating only wrt POS constraint c2 + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c1, + MOI.Utilities.zero_with_output_dimension( + MOI.VectorAffineFunction{Float64}, + 1, + ), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c2, + MOI.Utilities.vectorize(ones(6) .* x[1:6]), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c3, + MOI.Utilities.zero_with_output_dimension( + MOI.VectorAffineFunction{Float64}, + 3, + ), + ) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c4, + MOI.Utilities.zero_with_output_dimension( + MOI.VectorAffineFunction{Float64}, + 1, + ), + ) + DiffOpt.forward_differentiate!(model) + # for (i, vi) in enumerate(X) + # @test 0.0 ≈ MOI.get(model, + # DiffOpt.ForwardVariablePrimal(), vi) atol=1e-2 rtol=RTOL + # end + # TODO add a test here, probably on duals + # # note that there's no change in the PSD slack values or dual optimas + # @test dy ≈ [0.0, 0.0, 0.0, 0.125978, 0.0, 0.142644, 0.142641, 0.0127401, 0.0, 0.0, 0.0] atol=atol rtol=RTOL + # @test ds ≈ [0.0, 0.0, -6.66672, 0.0, -3.33336, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] atol=atol rtol=RTOL + return +end + +function test_differentiating_a_simple_psd() + # refer _psd3test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4484 + # find equivalent diffcp program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_0_py.ipynb + + # min x + # s.t. [x 1 1] + # [1 x 1] ⪰ 0 + # [1 1 x] + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variable(model) + func = MOI.Utilities.operate(vcat, Float64, x, 1.0, x, 1.0, 1.0, x) + # do not confuse this constraint with the matrix `c` in the conic form (of the matrices A, b, c) + c = MOI.add_constraint(model, func, MOI.PositiveSemidefiniteConeTriangle(3)) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [x]), 0.0), + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.optimize!(model) + # SCS/Mosek specific + # @test s' ≈ [1. 1.41421356 1.41421356 1. 1.41421356 1. ] atol=ATOL rtol=RTOL + # @test y' ≈ [ 0.33333333 -0.23570226 -0.23570226 0.33333333 -0.23570226 0.33333333] atol=ATOL rtol=RTOL + # dA = zeros(6, 1) + # db = ones(6) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.VectorAffineFunction{Float64}( + MOI.ScalarAffineTerm{Float64}[], + ones(6), + ), + ) + # dc = zeros(1) + DiffOpt.forward_differentiate!(model) + @test model.diff.model.x ≈ [1.0] atol = 10ATOL rtol = 10RTOL + @test model.diff.model.s ≈ ones(6) atol = ATOL rtol = RTOL + @test model.diff.model.y ≈ [1 / 3, -1 / 6, 1 / 3, -1 / 6, -1 / 6, 1 / 3] atol = + ATOL rtol = RTOL + @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = + RTOL + # @test dx ≈ [-0.5] atol=ATOL rtol=RTOL + # @test dy ≈ zeros(6) atol=ATOL rtol=RTOL + # @test ds ≈ [0.5, 1.0, 0.5, 1.0, 1.0, 0.5] atol=ATOL rtol=RTOL + # test 2 + dA = zeros(6, 1) + db = zeros(6) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.Utilities.zero_with_output_dimension( + MOI.VectorAffineFunction{Float64}, + 6, + ), + ) + MOI.set(model, DiffOpt.ForwardObjectiveFunction(), 1.0 * x) + DiffOpt.forward_differentiate!(model) + @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = + RTOL + # @test dx ≈ zeros(1) atol=ATOL rtol=RTOL + # @test dy ≈ [0.333333, -0.333333, 0.333333, -0.333333, -0.333333, 0.333333] atol=ATOL rtol=RTOL + # @test ds ≈ zeros(6) atol=ATOL rtol=RTOL + return +end + +function test_verifying_cache_after_differentiating_a_qp() + Q = [4.0 1.0; 1.0 2.0] + q = [1.0, 1.0] + G = [1.0 1.0] + h = [-1.0] + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variables(model, 2) + # define objective + quad_terms = MOI.ScalarQuadraticTerm{Float64}[] + for i in 1:2 + for j in i:2 # indexes (i,j), (j,i) will be mirrored. specify only one kind + push!(quad_terms, MOI.ScalarQuadraticTerm(Q[i, j], x[i], x[j])) + end + end + objective_function = MOI.ScalarQuadraticFunction( + quad_terms, + MOI.ScalarAffineTerm.(q, x), + 0.0, + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), + objective_function, + ) + # add constraint + c = MOI.add_constraint( + model, + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), + MOI.LessThan(h[1]), + ) + MOI.optimize!(model) + x_sol = MOI.get(model, MOI.VariablePrimal(), x) + @test x_sol ≈ [-0.25, -0.75] atol = ATOL rtol = RTOL + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + @test model.diff === nothing + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL + # adding two variables invalidates the cache + y = MOI.add_variables(model, 2) + MOI.delete(model, y) + @test model.diff === nothing + MOI.optimize!(model) + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL + # adding single variable invalidates the cache + y0 = MOI.add_variable(model) + @test model.diff === nothing + MOI.add_constraint( + model, + MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, y0)], 0.0), + MOI.EqualTo(42.0), + ) + MOI.optimize!(model) + @test model.diff === nothing + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 5e-3 rtol = RTOL + @test model.diff.model.gradient_cache isa DiffOpt.QuadraticProgram.Cache + # adding constraint invalidates the cache + c2 = MOI.add_constraint( + model, + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, 1.0], x), 0.0), + MOI.LessThan(0.0), + ) + @test model.diff === nothing + MOI.optimize!(model) + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 5e-3 rtol = RTOL + # second constraint inactive + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c2)) + @test grad_wrt_h ≈ 0.0 atol = 5e-3 rtol = RTOL + @test model.diff.model.gradient_cache isa DiffOpt.QuadraticProgram.Cache + return +end + +function test_verifying_cache_on_a_psd() + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variable(model) + func = MOI.Utilities.operate(vcat, Float64, x, 1.0, x, 1.0, 1.0, x) + c = MOI.add_constraint(model, func, MOI.PositiveSemidefiniteConeTriangle(3)) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [x]), 0.0), + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + @test model.diff === nothing + MOI.optimize!(model) + @test model.diff === nothing + dA = zeros(6, 1) + db = ones(6) + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.VectorAffineFunction{Float64}( + MOI.ScalarAffineTerm{Float64}[], + ones(6), + ), + ) + dc = zeros(1) + DiffOpt.forward_differentiate!(model) + @test model.diff.model.x ≈ [1.0] atol = 10ATOL rtol = 10RTOL + @test model.diff.model.s ≈ ones(6) atol = ATOL rtol = RTOL + @test model.diff.model.y ≈ [1 / 3, -1 / 6, 1 / 3, -1 / 6, -1 / 6, 1 / 3] atol = + ATOL rtol = RTOL + @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = + RTOL + @test model.diff.model.gradient_cache isa DiffOpt.ConicProgram.Cache + DiffOpt.forward_differentiate!(model) + @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = + RTOL + # test 2 + MOI.set( + model, + DiffOpt.ForwardConstraintFunction(), + c, + MOI.VectorAffineFunction{Float64}( + MOI.ScalarAffineTerm{Float64}[], + zeros(6), + ), + ) + MOI.set(model, DiffOpt.ForwardObjectiveFunction(), 1.0 * x) + DiffOpt.forward_differentiate!(model) + @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = + RTOL + return +end + +# min X[1,1] + X[2,2] max y +# X[2,1] = 1 [0 y/2 [ 1 0 +# y/2 0 <= 0 1] +# X >= 0 y free +# Optimal solution: +# +# ⎛ 1 1 ⎞ +# X = ⎜ ⎟ y = 2 +# ⎝ 1 1 ⎠ +function test_differentiating_simple_PSD_back() + model = DiffOpt.diff_optimizer(SCS.Optimizer) + MOI.set(model, MOI.Silent(), true) + X = MOI.add_variables(model, 3) + vov = MOI.VectorOfVariables(X) + cX = MOI.add_constraint( + model, + MOI.VectorAffineFunction{Float64}(vov), + MOI.PositiveSemidefiniteConeTriangle(2), + ) + c = MOI.add_constraint( + model, + MOI.VectorAffineFunction( + [MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, X[2]))], + [-1.0], + ), + MOI.Zeros(1), + ) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), + MOI.ScalarAffineFunction( + MOI.ScalarAffineTerm.(1.0, [X[1], X[end]]), + 0.0, + ), + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.optimize!(model) + x = MOI.get(model, MOI.VariablePrimal(), X) + cone_types = unique([ + S for (F, S) in + MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) + ]) + conic_form = DiffOpt.ConicProgram.Form{Float64}() + cones = conic_form.constraints.sets + DiffOpt.set_set_types(cones, cone_types) + index_map = MOI.copy_to(conic_form, model) + @test x ≈ ones(3) atol = ATOL rtol = RTOL + MOI.set(model, DiffOpt.ReverseVariablePrimal(), X[1], 1.0) + DiffOpt.reverse_differentiate!(model) + db = MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test db ≈ [-1.0] atol = ATOL rtol = RTOL + return +end + +function test_singular_exception() + Q = [4.0 1.0; 1.0 2.0] + q = [1.0, 1.0] + G = [1.0 1.0] + h = [-1.0] + model = DiffOpt.diff_optimizer(Ipopt.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variables(model, 2) + quad_terms = MOI.ScalarQuadraticTerm{Float64}[] + for i in 1:2 + for j in i:2 + push!(quad_terms, MOI.ScalarQuadraticTerm(Q[i, j], x[i], x[j])) + end + end + objective_function = MOI.ScalarQuadraticFunction( + quad_terms, + MOI.ScalarAffineTerm.(q, x), + 0.0, + ) + MOI.set( + model, + MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), + objective_function, + ) + MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) + c = MOI.add_constraint( + model, + MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), + MOI.LessThan(h[1]), + ) + MOI.optimize!(model) + @test MOI.get(model, MOI.VariablePrimal(), x) ≈ [-0.25; -0.75] atol = ATOL rtol = + RTOL + @test model.diff === nothing + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL + @test model.diff !== nothing + # adding two variables invalidates the cache + y = MOI.add_variables(model, 2) + for yi in y + MOI.delete(model, yi) + end + @test model.diff === nothing + MOI.optimize!(model) + MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) + DiffOpt.reverse_differentiate!(model) + grad_wrt_h = + MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) + @test grad_wrt_h ≈ -1.0 atol = 1e-3 + @test model.diff !== nothing + return +end + +function test_quad_to_soc() + model = DiffOpt.ConicProgram.Model() + F = MOI.VectorAffineFunction{Float64} + S = MOI.RotatedSecondOrderCone + # Adds the set type to the list + @test MOI.supports_constraint(model, F, S) + bridged = MOI.Bridges.Constraint.QuadtoSOC{Float64}(model) + x = MOI.add_variables(bridged, 3) + # We pick orthogonal rows but this is not the `U` that cholesky is going to + # take in the bridge. + U = [1 -1 1; 1 2 1; 1 0 -1.0] + a = [1, 2, -3.0] + b = -5.0 + Q = U' * U + f = x' * Q * x / 2.0 + a' * x + c = MOI.add_constraint(bridged, f, MOI.LessThan(-b)) + dQ = [1 -1 0; -1 2 1; 0 1 3.0] + da = [-1, 1.0, -2] + db = 3.0 + df = x' * dQ * x / 2.0 + da' * x + db + MOI.Utilities.final_touch(bridged, nothing) + MOI.set(bridged, DiffOpt.ForwardConstraintFunction(), c, df) + return +end + +end # module + +TestConicProgram.runtests() diff --git a/test/conic_reverse.jl b/test/conic_reverse.jl deleted file mode 100644 index c2e44507..00000000 --- a/test/conic_reverse.jl +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -import SCS -# min X[1,1] + X[2,2] max y -# X[2,1] = 1 [0 y/2 [ 1 0 -# y/2 0 <= 0 1] -# X >= 0 y free -# Optimal solution: -# -# ⎛ 1 1 ⎞ -# X = ⎜ ⎟ y = 2 -# ⎝ 1 1 ⎠ -@testset "Differentiating simple PSD back" begin - model = DiffOpt.diff_optimizer(SCS.Optimizer) - MOI.set(model, MOI.Silent(), true) - X = MOI.add_variables(model, 3) - vov = MOI.VectorOfVariables(X) - cX = MOI.add_constraint( - model, - MOI.VectorAffineFunction{Float64}(vov), - MOI.PositiveSemidefiniteConeTriangle(2), - ) - - c = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - [MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, X[2]))], - [-1.0], - ), - MOI.Zeros(1), - ) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - MOI.ScalarAffineFunction( - MOI.ScalarAffineTerm.(1.0, [X[1], X[end]]), - 0.0, - ), - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - MOI.optimize!(model) - - x = MOI.get(model, MOI.VariablePrimal(), X) - - cone_types = unique([ - S for (F, S) in - MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) - ]) - conic_form = DiffOpt.ConicProgram.Form{Float64}() - cones = conic_form.constraints.sets - DiffOpt.set_set_types(cones, cone_types) - index_map = MOI.copy_to(conic_form, model) - - @test x ≈ ones(3) atol = ATOL rtol = RTOL - - MOI.set(model, DiffOpt.ReverseVariablePrimal(), X[1], 1.0) - - DiffOpt.reverse_differentiate!(model) - - db = MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - - @test db ≈ [-1.0] atol = ATOL rtol = RTOL -end diff --git a/test/jump.jl b/test/jump.jl index 0c35bb15..074ae156 100644 --- a/test/jump.jl +++ b/test/jump.jl @@ -3,23 +3,37 @@ # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. +module TestJuMP + using Test using JuMP + +import DelimitedFiles import DiffOpt -import MathOptInterface as MOI -import LinearAlgebra: dot, ⋅, I -import Ipopt -import Ipopt import HiGHS +import Ipopt +import IterativeSolvers +import LinearAlgebra +import MathOptInterface as MOI import SCS -import DelimitedFiles -@testset "Testing forward on trivial QP" begin +const ATOL = 2e-4 +const RTOL = 2e-4 + +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$name", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end + end + return +end + +function test_forward_on_trivial_qp() # using example on https://osqp.org/docs/examples/setup-and-solve.html - Q = [ - 4.0 1.0 - 1.0 2.0 - ] + Q = [4.0 1.0; 1.0 2.0] q = [1.0, 1.0] G = [ 1.0 1.0 @@ -30,54 +44,35 @@ import DelimitedFiles 0.0 -1.0 ] h = [1, 0.7, 0.7, -1, 0, 0] - model = JuMP.Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, x[1:2]) - @objective( - model, - Min, - LinearAlgebra.dot(Q * x, x) + LinearAlgebra.dot(q, x) - ) + @objective(model, Min, x' * Q * x + q' * x) @constraint(model, G * x .<= h,) optimize!(model) - @test JuMP.value.(x) ≈ [0.3, 0.7] atol = ATOL rtol = RTOL + return end -@testset "Differentiating trivial QP 1" begin - Q = [ - 4.0 1.0 - 1.0 2.0 - ] +function test_differentiating_trivial_qp_1() + Q = [4.0 1.0; 1.0 2.0] q = [1.0, 1.0] G = [1.0 1.0] h = [-1.0] - model = JuMP.direct_model(DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) x = @variable(model, [1:2]) - @objective( - model, - Min, - LinearAlgebra.dot(Q * x, x) + LinearAlgebra.dot(q, x) - ) + @objective(model, Min, x' * Q * x + q' * x) @constraint(model, ctr_le, G * x .<= h,) optimize!(model) - @test JuMP.value.(x) ≈ [-0.25, -0.75] atol = ATOL rtol = RTOL - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, 1.0) - DiffOpt.reverse_differentiate!(model) - DiffOpt.reverse_differentiate!(model) - grad_constraint = JuMP.constant( MOI.get(model, DiffOpt.ReverseConstraintFunction(), ctr_le[1]), ) @test grad_constraint ≈ -1.0 atol = ATOL rtol = RTOL - # Test some overloads from https://github.com/jump-dev/DiffOpt.jl/issues/211 grad_obj = MOI.get(model, DiffOpt.ReverseObjectiveFunction()) @test JuMP.coefficient(grad_obj, x[1], x[2]) ≈ @@ -87,12 +82,12 @@ end 2 * JuMP.coefficient(grad_obj, x[1], x[1]) atol = ATOL rtol = RTOL # TODO: this simple show fails @show ctr_le + return end -@testset "Differentiating QP with inequality and equality constraints" begin +function test_differentiating_qp_with_inequality_and_equality_constraints() # refered from: https://www.mathworks.com/help/optim/ug/quadprog.html#d120e113424 # Find equivalent qpth program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_4_py.ipynb - Q = [ 1.0 -1.0 1.0 -1.0 2.0 -2.0 @@ -110,26 +105,20 @@ end h = [1.0, 1.0, 1.0, 0.0, 0.0, 0.0] A = [1.0 1.0 1.0;] b = [0.5] - model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, x[1:3]) - @objective(model, Min, dot(Q * x, x) + dot(q, x)) + @objective(model, Min, x' * Q * x + q' * x) @constraint(model, ctr_le, G * x .<= h,) @constraint(model, ctr_eq, A * x .== b,) optimize!(model) - @test JuMP.value.(x) ≈ [0.0, 0.5, 0.0] atol = ATOL rtol = RTOL - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, 1.0) - DiffOpt.reverse_differentiate!(model) - @test MOI.Utilities.isapprox_zero( moi_function(MOI.get(model, DiffOpt.ReverseObjectiveFunction())), ATOL, ) - db = [1.0] dA = [0.0 -0.5 0.0] for (j, jc) in enumerate(ctr_eq) @@ -139,7 +128,6 @@ end @test JuMP.coefficient(grad, iv) ≈ dA[j, i] atol = ATOL rtol = RTOL end end - dh = zeros(6, 1) dG = zeros(6, 3) for (j, jc) in enumerate(ctr_le) @@ -149,18 +137,18 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end + return end # refered from https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contquadratic.jl#L3 # Find equivalent CVXPYLayers and QPTH code here: # https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_1_py.ipynb -@testset "Differentiating MOI examples 1" begin +function test_differentiating_MOI_examples_1() # homogeneous quadratic objective # Min x^2 + xy + y^2 + yz + z^2 # st x + 2y + 3z >= 4 (c1) # x + y >= 1 (c2) # x, y, z \in R - model = JuMP.direct_model(DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) @variables(model, begin @@ -171,17 +159,11 @@ end @constraint(model, c1, x + 2y + 3z >= 4) @constraint(model, c2, x + y >= 1) @objective(model, Min, x^2 + x * y + y^2 + y * z + z^2) - optimize!(model) - z = [x, y, z] - @test JuMP.value.(z) ≈ [4 / 7, 3 / 7, 6 / 7] atol = ATOL rtol = RTOL - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), z, 1.0) - DiffOpt.reverse_differentiate!(model) - dl_dq = [-0.2142857; 0.21428567; -0.07142857] dl_dQ = [ -0.12244895 0.01530609 -0.11224488 @@ -191,7 +173,6 @@ end expected = dl_dq' * z + z' * (dl_dQ / 2.0) * z @test moi_function(MOI.get(model, DiffOpt.ReverseObjectiveFunction())) ≈ moi_function(expected) atol = ATOL rtol = RTOL - dh = [0.35714284; 0.4285714] dG = -[ 0.05102035 0.30612245 0.255102 @@ -204,31 +185,25 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end + return end -@testset "Differentiating MOI examples 2 - non trivial backward pass vector" begin +function test_differentiating_moi_examples_2_non_trivial_backward_pass_vector() # non-homogeneous quadratic objective # minimize 2 x^2 + y^2 + xy + x + y # s.t. x, y >= 0 # x + y = 1 (c1) - model = JuMP.Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, x ≥ 0) @variable(model, y ≥ 0) @constraint(model, c1, x + y == 1) @objective(model, Min, 2 * x^2 + y^2 + x * y + x + y) - optimize!(model) - z = [x, y] - @test JuMP.value.(z) ≈ [0.25, 0.75] atol = ATOL rtol = RTOL - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), z, [1.3, 0.5]) - DiffOpt.reverse_differentiate!(model) - dl_dq = [-0.2; 0.2] dl_dQ = [ -0.05 -0.05 @@ -237,17 +212,13 @@ end expected = dl_dq' * z + z' * (dl_dQ / 2.0) * z @test moi_function(MOI.get(model, DiffOpt.ReverseObjectiveFunction())) ≈ moi_function(expected) atol = ATOL rtol = RTOL - func = MOI.get(model, DiffOpt.ReverseConstraintFunction(), c1) @test -0.7 ≈ JuMP.constant(func) atol = ATOL rtol = RTOL - dl_dA = [0.375 -1.075] for (j, vi) in enumerate(z) @test dl_dA[j] ≈ JuMP.coefficient(func, vi) atol = ATOL rtol = RTOL end - c_le = [LowerBoundRef(x), LowerBoundRef(y)] - dl_dh = [1e-8; 1e-8] dl_dG = [1e-8 1e-8; 1e-8 1e-8] for (j, jc) in enumerate(c_le) @@ -258,120 +229,82 @@ end RTOL end end + return end -@testset "Differentiating non trivial convex QP JuMP" begin +function test_differentiating_non_trivial_convex_qp_jump() nz = 10 nineq_le = 25 neq = 10 - # read matrices from files names = ["P", "q", "G", "h", "A", "b"] matrices = [] - for name in names - push!( - matrices, - DelimitedFiles.readdlm( - joinpath( - dirname(dirname(pathof(DiffOpt))), - "test", - "data", - name * ".txt", - ), - ' ', - Float64, - '\n', - ), - ) + filename = joinpath(@__DIR__, "data", "$name.txt") + push!(matrices, DelimitedFiles.readdlm(filename, ' ', Float64, '\n')) end - Q, q, G, h, A, b = matrices q = vec(q) h = vec(h) b = vec(b) - model = JuMP.Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer)) MOI.set(model, MOI.Silent(), true) - @variable(model, x[1:nz]) - - @objective(model, Min, dot(Q * x, x) + dot(q, x)) + @objective(model, Min, x' * Q * x + q' * x) @constraint(model, c_le, G * x .<= h) @constraint(model, c_eq, A * x .== b) - optimize!(model) - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, 1.0) - # compute gradients DiffOpt.reverse_differentiate!(model) - # read gradients from files param_names = ["dP", "dq", "dG", "dh", "dA", "db"] grads_actual = [] - for name in param_names + filename = joinpath(@__DIR__, "data", "$(name).txt") push!( grads_actual, - DelimitedFiles.readdlm( - joinpath(@__DIR__, "data", "$(name).txt"), - ' ', - Float64, - '\n', - ), + DelimitedFiles.readdlm(filename, ' ', Float64, '\n'), ) end - - dq = grads_actual[2] + dq = vec(grads_actual[2]) dh = grads_actual[4] db = grads_actual[6] - grad = MOI.get(model, DiffOpt.ReverseObjectiveFunction()) - @test moi_function(grad) ≈ moi_function(dq ⋅ x) atol = 1e-2 rtol = 1e-2 - + @test moi_function(grad) ≈ moi_function(dq' * x) atol = 1e-2 rtol = 1e-2 for (i, ci) in enumerate(c_le) @test -dh[i] ≈ JuMP.constant( MOI.get(model, DiffOpt.ReverseConstraintFunction(), ci), ) atol = 1e-2 rtol = 1e-2 end - for (i, ci) in enumerate(c_eq) @test -db[i] ≈ JuMP.constant( MOI.get(model, DiffOpt.ReverseConstraintFunction(), ci), ) atol = 1e-2 rtol = 1e-2 end - # # testing differences # for i in 1:size(grads)[1] # @test grads[i] ≈ grads_actual[i] atol=1e-2 rtol=1e-2 # end + return end -@testset "Differentiating LP; checking gradients for non-active contraints" begin +function test_differentiating_lp_checking_gradients_for_non_active_contraints() # Issue #40 from Gurobi.jl # min x # s.t. x >= 0 # x >= 3 - model = direct_model(DiffOpt.diff_optimizer(HiGHS.Optimizer)) MOI.set(model, MOI.Silent(), true) - @variable(model, x[1:1]) - @objective(model, Min, 1.1 * x[1]) @constraint(model, c1, x[1] ≥ 0) @constraint(model, c2, x[1] ≥ 3) - optimize!(model) - # obtain gradients MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, 1.0) - DiffOpt.reverse_differentiate!(model) - c_le = [c1, c2] - dh = [0.0, 1.0] dG = [0.0, -3.0] for (j, jc) in enumerate(c_le) @@ -381,24 +314,16 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end - model = direct_model(DiffOpt.diff_optimizer(HiGHS.Optimizer)) MOI.set(model, MOI.Silent(), true) - @variable(model, x[1:1]) - @objective(model, Min, x[1]) @constraint(model, c1, x[1] ≥ 0) @constraint(model, c2, x[1] ≥ 3) - optimize!(model) - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, 1.0) - DiffOpt.reverse_differentiate!(model) - c_le = [c1, c2] - dh = [0.0, 1.0] dG = [0.0, -3.0] for (j, jc) in enumerate(c_le) @@ -408,30 +333,25 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end + return end -@testset "Differentiating LP; checking gradients for non-active contraints" begin +function test_differentiating_lp_checking_gradients_for_non_active_contraints2() # refered from - https://en.wikipedia.org/wiki/Simplex_algorithm#Example - # max 2x + 3y + 4z # s.t. 3x+2y+z <= 10 # 2x+5y+3z <= 15 # x,y,z >= 0 - model = direct_model(DiffOpt.diff_optimizer(SCS.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, v[1:3] ≥ 0) - @objective(model, Min, dot([-2.0, -3.0, -4.0], v)) + @objective(model, Min, LinearAlgebra.dot([-2.0, -3.0, -4.0], v)) (x, y, z) = v - @constraint(model, c1, 3x + 2y + z <= 10) @constraint(model, c2, 2x + 5y + 3z <= 15) optimize!(model) - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), v, 1.0) - DiffOpt.reverse_differentiate!(model) - dQ = zeros(3, 3) dc = zeros(3) dG = [ @@ -442,15 +362,12 @@ end 0.0 0.0 0.0 ] dh = [0.0; 1 / 3; 1 / 3; -2 / 3; 0.0] - cb = LowerBoundRef.(v) cc = [c1, c2] ctrs = vcat(cc, cb)#, cc) - expected = dc' * v + v' * dQ * v grad = MOI.get(model, DiffOpt.ReverseObjectiveFunction()) @test moi_function(grad) ≈ moi_function(expected) atol = ATOL rtol = RTOL - for (j, jc) in enumerate(ctrs) grad = MOI.get(model, DiffOpt.ReverseConstraintFunction(), jc) @test JuMP.constant(grad) ≈ -dh[j] atol = ATOL rtol = RTOL @@ -458,9 +375,10 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end + return end -@testset "Differentiating LP with variable bounds" begin +function test_differentiating_lp_with_variable_bounds() # max 2x + 3y + 4z # s.t. 3x+2y+z <= 10 # 2x+5y+3z <= 15 @@ -469,7 +387,6 @@ end # z ≥ 2 # x,y,z >= 0 # variant of previous test with same solution - model = direct_model(DiffOpt.diff_optimizer(HiGHS.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, v[1:3] ≥ 0) @@ -480,13 +397,9 @@ end @constraint(model, c3, x ≤ 3) @constraint(model, c4, 1.0 * y ≤ 2) @constraint(model, c5, z ≥ 2) - optimize!(model) - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), v, 1.0) - DiffOpt.reverse_differentiate!(model) - dQ = zeros(3, 3) dc = zeros(3) dG = [ @@ -500,15 +413,12 @@ end 0.0 0.0 0.0 ] dh = [0.0, 1 / 3, 0.0, 0.0, 0.0, 1 / 3, -2 / 3, 0.0] - cb = LowerBoundRef.(v) cc = [c1, c2, c3, c4, c5] ctrs = vcat(cc, cb) - expected = dc' * v + v' * dQ * v grad = MOI.get(model, DiffOpt.ReverseObjectiveFunction()) @test moi_function(grad) ≈ moi_function(expected) atol = ATOL rtol = RTOL - for (j, jc) in enumerate(ctrs) grad = MOI.get(model, DiffOpt.ReverseConstraintFunction(), jc) @test JuMP.constant(grad) ≈ -dh[j] atol = ATOL rtol = RTOL @@ -516,9 +426,10 @@ end @test JuMP.coefficient(grad, iv) ≈ dG[j, i] atol = ATOL rtol = RTOL end end + return end -@testset "Differentiating simple SOCP" begin +function test_differentiating_simple_socp() # referred from _soc2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L1355 # find equivalent diffcp python program here: https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_socp_1_py.ipynb @@ -531,42 +442,203 @@ end # s.t. -1/√2 + y ∈ R₊ # 1 - t ∈ {0} # (t,x,y) ∈ SOC₃ - model = JuMP.direct_model(DiffOpt.diff_optimizer(SCS.Optimizer)) MOI.set(model, MOI.Silent(), true) @variable(model, x) @variable(model, y) @variable(model, t) - vv = [x, y, t] - @objective(model, Min, x) @constraint(model, c1, [-1 / √2 + y] in MOI.Nonnegatives(1)) @constraint(model, c2, [1 - t] in MOI.Zeros(1)) @constraint(model, c3, [1.0 * t, x, y] in SecondOrderCone()) - optimize!(model) - v = JuMP.value.(vv) # slack variables s = collect(Iterators.flatten(JuMP.value.([c1, c2, c3]))) y = collect(Iterators.flatten(JuMP.dual.([c1, c2, c3]))) - # these matrices are benchmarked with the output generated by diffcp # refer the python file mentioned above to get equivalent python source code @test v ≈ [-1 / √2; 1 / √2; 1.0] atol = ATOL rtol = RTOL @test s ≈ [0.0, 0.0, 1.0, -1 / √2, 1 / √2] atol = ATOL rtol = RTOL @test y ≈ [1, √2, √2, 1, -1] atol = ATOL rtol = RTOL - dA = zeros(5, 3) - dA[1:3, :] .= Matrix(1.0I, 3, 3) + dA[1:3, :] .= LinearAlgebra.I(3) db = zeros(5) dc = zeros(3) - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), vv, 1.0) - @test_broken DiffOpt.reverse_differentiate!(model) - # TODO add tests + return +end +function test_sensitivity_index_issue() + testModel = Model(() -> DiffOpt.diff_optimizer(HiGHS.Optimizer)) + #Variables + @variable(testModel, v[i in 1:1]) + @variable(testModel, u[i in 1:1]) + @variable(testModel, g[j in 1:3]) + @variable(testModel, a) + #Contraints + @constraint(testModel, v_cons_min, v[1] >= 0.0) + @constraint(testModel, u_cons_min, u[1] >= 0.0) + @constraint(testModel, G1_cons_min, g[1] >= 0.0) + @constraint(testModel, G2_cons_min, g[2] >= 0.0) + @constraint(testModel, G3_cons_min, g[3] >= 0.0) + @constraint(testModel, a_cons_min, a >= 0.0) + @constraint(testModel, v_cons_max, v[1] <= 50.0) + @constraint(testModel, G1_cons_max, g[1] <= 15.0) + @constraint(testModel, G2_cons_max, g[2] <= 20.0) + @constraint(testModel, G3_cons_max, g[3] <= 25.0) + @constraint(testModel, future_constraint, v[1] + a >= 50) + @constraint(testModel, hidro_conservation[i in 1:1], v[1] + u[i] == 45.0) + @constraint(testModel, demand_constraint, g[1] + g[2] + g[3] + u[1] == 100) + #Stage objective + @objective(testModel, Min, 3.0 * g[1] + 5.0 * g[2] + 7.0 * g[3] + a) + #Calculation of sensitivities by Manual KKT + #v,u,g1,g2,g3,a + A = [ + 1.0 1.0 0.0 0.0 0.0 0.0 + 0.0 1.0 1.0 1.0 1.0 0.0 + ] + #rhs + b = [ + 45.0 + 100.0 + ] + #v,u,g1,g2,g3,a + G = [ + -1.0 0.0 0.0 0.0 0.0 0.0 + 0.0 -1.0 0.0 0.0 0.0 0.0 + 0.0 0.0 -1.0 0.0 0.0 0.0 + 0.0 0.0 0.0 -1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 -1.0 0.0 + 0.0 0.0 0.0 0.0 0.0 -1.0 + 1.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 1.0 0.0 + -1.0 0.0 0.0 0.0 0.0 -1.0 + ] + h = [#rhs + 0.0 + 0.0 + 0.0 + 0.0 + 0.0 + 0.0 + 50.0 + 15.0 + 20.0 + 25.0 + -50.0 + ] + optimize!(testModel) + @test JuMP.value.(testModel[:g]) == [15.0, 20.0, 20.0] + @test JuMP.value.(testModel[:v]) == [0.0] + @test JuMP.value.(testModel[:u]) == [45.0] + @test JuMP.value(testModel[:a]) == 50.0 + lambda = [ + JuMP.dual.(testModel[:v_cons_min]) + JuMP.dual.(testModel[:u_cons_min]) + JuMP.dual.(testModel[:G1_cons_min]) + JuMP.dual.(testModel[:G2_cons_min]) + JuMP.dual.(testModel[:G3_cons_min]) + JuMP.dual.(testModel[:a_cons_min]) + JuMP.dual.(testModel[:v_cons_max]) + JuMP.dual.(testModel[:G1_cons_max]) + JuMP.dual.(testModel[:G2_cons_max]) + JuMP.dual.(testModel[:G3_cons_max]) + JuMP.dual.(testModel[:future_constraint]) + ] + lambda = abs.(lambda) + z = [ + JuMP.value.(testModel[:v])[1] + JuMP.value.(testModel[:u])[1] + JuMP.value.(testModel[:g])[1] + JuMP.value.(testModel[:g])[2] + JuMP.value.(testModel[:g])[3] + JuMP.value.(testModel[:a]) + ] + Q = zeros(6, 6) + D_lambda = LinearAlgebra.diagm(lambda) + D_Gz_h = LinearAlgebra.diagm(G * z - h) + KKT = [ + Q (G') (A') + D_lambda*G LinearAlgebra.diagm(G * z - h) zeros(11, 2) + A zeros(2, 11) zeros(2, 2) + ] + rhsKKT = [ + zeros(6, 11) zeros(6, 2) + D_lambda zeros(11, 2) + zeros(2, 11) LinearAlgebra.diagm(ones(2)) + ] + derivativeKKT = reduce( + hcat, + IterativeSolvers.lsqr(KKT, rhsKKT[:, i]) for i in 1:size(rhsKKT, 2) + ) + dprimal_dconsKKT = derivativeKKT[1:6, :] + #Finished calculation of sensitivities by Manual KKT + #Calculation of sensitivities by DiffOpt + xRef = [ + testModel[:v_cons_min] + testModel[:u_cons_min] + testModel[:G1_cons_min] + testModel[:G2_cons_min] + testModel[:G3_cons_min] + testModel[:a_cons_min] + testModel[:v_cons_max] + testModel[:G1_cons_max] + testModel[:G2_cons_max] + testModel[:G3_cons_max] + testModel[:future_constraint] + testModel[:hidro_conservation] + testModel[:demand_constraint] + ] + yRef = [ + testModel[:v] + testModel[:u] + testModel[:g] + testModel[:a] + ] + dprimal_dcons = Array{Float64,2}(undef, length(yRef), length(xRef)) + for i in 1:length(xRef) + constraint_equation = convert(MOI.ScalarAffineFunction{Float64}, 1.0) + MOI.set( + testModel, + DiffOpt.ForwardConstraintFunction(), + xRef[i], + constraint_equation, + ) + DiffOpt.forward_differentiate!(testModel) + dprimal_dcons[:, i] .= + MOI.get.(testModel, DiffOpt.ForwardVariablePrimal(), yRef) + constraint_equation = convert(MOI.ScalarAffineFunction{Float64}, 0.0) + MOI.set( + testModel, + DiffOpt.ForwardConstraintFunction(), + xRef[i], + constraint_equation, + ) + end + #The result given by Manual KKT needs to invert sign in some values to match the constraints. + #The result given by DiffOpt needs to invert sign to be in the right side of the equation. + @test -dprimal_dcons[:, 1] ≈ -dprimal_dconsKKT[:, 1] atol = ATOL + @test -dprimal_dcons[:, 2] ≈ -dprimal_dconsKKT[:, 2] atol = ATOL + @test -dprimal_dcons[:, 3] ≈ -dprimal_dconsKKT[:, 3] atol = ATOL + @test -dprimal_dcons[:, 4] ≈ -dprimal_dconsKKT[:, 4] atol = ATOL + @test -dprimal_dcons[:, 5] ≈ -dprimal_dconsKKT[:, 5] atol = ATOL + @test -dprimal_dcons[:, 6] ≈ -dprimal_dconsKKT[:, 6] atol = ATOL + @test -dprimal_dcons[:, 7] ≈ dprimal_dconsKKT[:, 7] atol = ATOL + @test -dprimal_dcons[:, 8] ≈ dprimal_dconsKKT[:, 8] atol = ATOL + @test -dprimal_dcons[:, 9] ≈ dprimal_dconsKKT[:, 9] atol = ATOL + @test -dprimal_dcons[:, 10] ≈ dprimal_dconsKKT[:, 10] atol = ATOL + @test -dprimal_dcons[:, 11] ≈ -dprimal_dconsKKT[:, 11] atol = ATOL + @test -dprimal_dcons[:, 12] ≈ dprimal_dconsKKT[:, 12] atol = ATOL + @test -dprimal_dcons[:, 13] ≈ dprimal_dconsKKT[:, 13] atol = ATOL + return end + +end # module + +TestJuMP.runtests() diff --git a/test/moi_wrapper.jl b/test/moi_wrapper.jl index dd813cb0..e2d547c4 100644 --- a/test/moi_wrapper.jl +++ b/test/moi_wrapper.jl @@ -3,1391 +3,125 @@ # Use of this source code is governed by an MIT-style license that can be found # in the LICENSE.md file or at https://opensource.org/licenses/MIT. +module TestMOIWrapper + using Test import DiffOpt -import MathOptInterface as MOI -import DelimitedFiles -import Ipopt import HiGHS -import SCS - -const VAF = MOI.VectorAffineFunction{Float64} -_vaf(c::Vector{Float64}) = VAF(MOI.ScalarAffineTerm{Float64}[], c) - -@testset "MOI Unit" begin - function test_runtests() - model = DiffOpt.diff_optimizer(HiGHS.Optimizer) - # `Variable.ZerosBridge` makes dual needed by some tests fail. - MOI.Bridges.remove_bridge( - model.optimizer.optimizer, - MOI.Bridges.Variable.ZerosBridge{Float64}, - ) - MOI.set(model, MOI.Silent(), true) - config = MOI.Test.Config(; atol = 1e-7) - MOI.Test.runtests(model, config), return - end - test_runtests() -end - -@testset "Testing forward on trivial QP" begin - # using example on https://osqp.org/docs/examples/setup-and-solve.html - Q = [ - 4.0 1.0 - 1.0 2.0 - ] - q = [1.0, 1.0] - G = [ - 1.0 1.0 - 1.0 0.0 - 0.0 1.0 - -1.0 -1.0 - -1.0 0.0 - 0.0 -1.0 - ] - h = [1, 0.7, 0.7, -1, 0, 0] - qp_test_with_solutions( - Ipopt.Optimizer; - Q = Q, - q = q, - G = G, - h = h, - dzb = ones(2), - dQf = [1 -1; -1 1.0], - dqf = [1, -1.0], - dGf = ones(6, 2), - dhf = ones(6), - # Expected solutions - z = [0.3, 0.7], - ) -end - -@testset "Differentiating trivial QP 1" begin - Q = [ - 4.0 1.0 - 1.0 2.0 - ] - q = [1.0, 1.0] - G = [1.0 1.0] - h = [-1.0] - - model = DiffOpt.diff_optimizer(Ipopt.Optimizer) - MOI.set(model, MOI.Silent(), true) - x = MOI.add_variables(model, 2) - - qp_test_with_solutions( - Ipopt.Optimizer; - Q = Q, - q = q, - G = G, - h = h, - dzb = ones(2), - dQf = -ones(2, 2), - dqf = ones(2), - dGf = ones(1, 2), - dhf = -ones(1), - # Expected solutions - z = [-0.25; -0.75], - dhb = ones(1), - ) -end - -# @testset "Differentiating a non-convex QP" begin -# Q = [0.0 0.0; 1.0 2.0] -# q = [1.0; 1.0] -# G = [1.0 1.0;] -# h = [-1.0;] - -# model = DiffOpt.diff_optimizer(Ipopt.Optimizer) -# x = MOI.add_variables(model, 2) - -# # define objective -# quad_terms = MOI.ScalarQuadraticTerm{Float64}[] -# for i in 1:2 -# for j in i:2 # indexes (i,j), (j,i) will be mirrored. specify only one kind -# push!( -# quad_terms, -# MOI.ScalarQuadraticTerm(Q[i,j], x[i], x[j]), -# ) -# end -# end - -# objective_function = MOI.ScalarQuadraticFunction( -# quad_terms, -# MOI.ScalarAffineTerm.(q, x), -# 0.0, -# ) -# MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), objective_function) -# MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - -# # add constraint -# MOI.add_constraint( -# model, -# MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), -# MOI.LessThan(h[1]), -# ) - -# @test_throws ErrorException MOI.optimize!(model) # should break -# end - -@testset "Differentiating QP with inequality and equality constraints" begin - # refered from: https://www.mathworks.com/help/optim/ug/quadprog.html#d120e113424 - # Find equivalent qpth program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_4_py.ipynb - - Q = [ - 1.0 -1.0 1.0 - -1.0 2.0 -2.0 - 1.0 -2.0 4.0 - ] - q = [2.0, -3.0, 1.0] - G = [ - 0.0 0.0 1.0 - 0.0 1.0 0.0 - 1.0 0.0 0.0 - 0.0 0.0 -1.0 - 0.0 -1.0 0.0 - -1.0 0.0 0.0 - ] - h = [1.0, 1.0, 1.0, 0.0, 0.0, 0.0] - A = [1.0 1.0 1.0;] - b = [0.5] - - qp_test_with_solutions( - Ipopt.Optimizer; - Q = Q, - q = q, - G = G, - h = h, - A = A, - b = b, - dzb = ones(3), - dQf = ones(3, 3), - dqf = ones(3), - dGf = ones(6, 3), - dhf = ones(6), - dAf = ones(1, 3), - dbf = ones(1), - # Expected solutions - z = [0.0, 0.5, 0.0], - dQb = zeros(3, 3), - dqb = zeros(3), - dGb = zeros(6, 3), - dhb = zeros(6), - dAb = [0.0 -0.5 0.0], - dbb = [1.0], - ) -end - -# refered from https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contquadratic.jl#L3 -# Find equivalent CVXPYLayers and QPTH code here: -# https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_1_py.ipynb -@testset "Differentiating MOI examples 1" begin - # homogeneous quadratic objective - # Min x^2 + xy + y^2 + yz + z^2 - # st x + 2y + 3z >= 4 (c1) - # x + y >= 1 (c2) - # x, y, z \in R - - Q = [ - 2.0 1.0 0.0 - 1.0 2.0 1.0 - 0.0 1.0 2.0 - ] - q = zeros(3) - G = [ - -1.0 -2.0 -3.0 - -1.0 -1.0 0.0 - ] - h = [-4.0, -1.0] - - dQ = [ - -0.12244895 0.01530609 -0.11224488 - 0.01530609 0.09183674 0.07653058 - -0.11224488 0.07653058 -0.06122449 - ] - dq = [-0.2142857, 0.21428567, -0.07142857] - - dG = [ - 0.05102692 0.30612244 0.25510856 - 0.06120519 0.36734693 0.30610315 - ] - dh = [-0.35714284; -0.4285714] - - qp_test_with_solutions( - Ipopt.Optimizer; - Q = Q, - q = q, - G = G, - h = h, - dzb = ones(3), - dQf = dQ, - dqf = dq, - dGf = dG, - dhf = dh, - # Expected solutions - dQb = dQ, - dqb = dq, - dGb = dG, - dhb = dh, - ) -end - -# refered from https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contquadratic.jl#L3 -# Find equivalent CVXPYLayers and QPTH code here: -# https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_2_py.ipynb -@testset "Differentiating MOI examples 2" begin - # non-homogeneous quadratic objective - # minimize 2 x^2 + y^2 + xy + x + y - # s.t. x, y >= 0 - # x + y = 1 - - Q = [ - 4 1.0 - 1 2 - ] - q = [1, 1.0] - G = [ - -1 0.0 - 0 -1 - ] - h = [0, 0.0] - A = [1 1.0] - b = [1.0] - - dQ = [ - -0.05 -0.05 - -0.05 0.15 - ] - dq = [-0.2, 0.2] - dG = zeros(2, 2) - dh = zeros(2) - dA = [0.375 -1.075] - db = [0.7] - - qp_test_with_solutions( - Ipopt.Optimizer; - Q = Q, - q = q, - G = G, - h = h, - A = A, - b = b, - dzb = [1.3, 0.5], - dQf = dQ, - dqf = dq, - dGf = dG, - dhf = dh, - dAf = dA, - dbf = db, - # Expected solutions - dQb = dQ, - dqb = dq, - dGb = dG, - dhb = dh, - dAb = dA, - dbb = db, - z = [0.25, 0.75], - dzf = [1.4875, -0.075], - ∇zf = [-1.28125, 3.25625], - ∇zb = [-0.2, 0.2], - λ = zeros(2), - dλf = zeros(2), - dλb = zeros(2), - ∇λb = [0.8, -0.8 / 3], - ν = [-2.75], - dνb = zeros(1), - ∇νb = [-0.7], - ) -end - -@testset "Differentiating non trivial convex QP MOI" begin - nz = 10 - nineq_le = 25 - neq = 10 - - # read matrices from files - names = ["P", "q", "G", "h", "A", "b"] - matrices = [] - - for name in names - push!( - matrices, - DelimitedFiles.readdlm( - joinpath( - dirname(dirname(pathof(DiffOpt))), - "test", - "data", - name * ".txt", - ), - ' ', - Float64, - '\n', - ), - ) - end - - Q, q, G, h, A, b = matrices - q = vec(q) - h = vec(h) - b = vec(b) - - # read gradients from files - names = ["dP", "dq", "dG", "dh", "dA", "db"] - grads_actual = [] - - for name in names - push!( - grads_actual, - DelimitedFiles.readdlm( - joinpath(@__DIR__, "data", name * ".txt"), - ' ', - Float64, - '\n', - ), - ) - end - - dqb = vec(grads_actual[2]) - dhb = vec(grads_actual[4]) - dbb = vec(grads_actual[6]) - - qp_test( - Ipopt.Optimizer, - true, - true, - true; - Q = Q, - q = q, - G = G, - h = h, - A = A, - b = b, - dzb = ones(nz), - dQf = ones(nz, nz), - dqf = ones(nz), - dGf = ones(length(h), nz), - dhf = ones(length(h)), - dAf = ones(length(b), nz), - dbf = ones(length(b)), - # Expected solutions - dqb = dqb, - dhb = dhb, - dbb = dbb, - atol = 1e-3, # The values in `data` seems to have low accuracy - rtol = 1e-3, # The values in `data` seems to have low accuracy - ) -end - -@testset "Differentiating LP; checking gradients for non-active contraints" begin - # Issue #40 from Gurobi.jl - # min x - # s.t. x >= 0 - # x >= 3 - nz = 1 - qp_test_with_solutions( - HiGHS.Optimizer; - q = ones(nz), - G = -ones(2, nz), - h = [0.0, -3.0], - dzb = ones(nz), - dqf = ones(nz), - # Expected solutions - dGb = [0.0, 3.0], - dhb = [0.0, -1.0], - ) -end - -@testset "Differentiating a simple LP with GreaterThan constraint" begin - # this is canonically same as above test - # min x - # s.t. x >= 3 - nz = 1 - qp_test_with_solutions( - Ipopt.Optimizer; - q = ones(nz), - G = -ones(1, nz), - h = [-3.0], - dzb = ones(nz), - dqf = ones(nz), - # Expected solutions - dGb = [3.0], - dhb = [-1.0], - ) -end - -@testset "Differentiating LP; checking gradients for non-active contraints" begin - # refered from - https://en.wikipedia.org/wiki/Simplex_algorithm#Example - - # max 2x + 3y + 4z - # s.t. 3x+2y+z <= 10 - # 2x+5y+3z <= 15 - # x,y,z >= 0 - nz = 3 - qp_test_with_solutions( - SCS.Optimizer; - q = [-2.0, -3.0, -4.0], - G = [ - 3.0 2.0 1.0 - 2.0 5.0 3.0 - -1.0 0.0 0.0 - 0.0 -1.0 0.0 - 0.0 0.0 -1.0 - ], - h = [10.0, 15.0, 0.0, 0.0, 0.0], - dzb = ones(nz), - dqf = ones(nz), - # Expected solutions - dqb = zeros(nz), - dGb = [ - 0.0 0.0 0.0 - 0.0 0.0 -5/3 - 0.0 0.0 5/3 - 0.0 0.0 -10/3 - 0.0 0.0 0.0 - ], - dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0], - ) -end - -@testset "Differentiating LP with variable bounds" begin - # max 2x + 3y + 4z - # s.t. 3x+2y+z <= 10 - # 2x+5y+3z <= 15 - # x ≤ 3 - # 0 ≤ y ≤ 2 - # z ≥ 2 - # x,y,z >= 0 - # variant of previous test with same solution - nz = 3 - qp_test_with_solutions( - HiGHS.Optimizer; - q = [-2.0, -3.0, -4.0], - G = [ - 3.0 2.0 1.0 - 2.0 5.0 3.0 - -1.0 0.0 0.0 - 0.0 -1.0 0.0 - 0.0 0.0 -1.0 - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - ], - h = [10.0, 15.0, 0.0, 0.0, 0.0, 3.0, 2.0, 6.0], - dzb = ones(nz), - dqf = ones(nz), - # Expected solutions - dqb = zeros(nz), - dGb = [ - 0.0 0.0 0.0 - 0.0 0.0 -5/3 - 0.0 0.0 5/3 - 0.0 0.0 -10/3 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - ], - dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0, 0.0, 0.0, 0.0], - ) -end - -@testset "Differentiating LP with variable bounds 2" begin - nz = 3 - qp_test_with_solutions( - HiGHS.Optimizer; - q = [-2.0, -3.0, -4.0], - G = [ - 3.0 2.0 1.0 - 2.0 5.0 3.0 - 0.0 -1.0 0.0 - 0.0 0.0 -1.0 - ], - h = [10.0, 15.0, 0.0, 0.0], - fix_indices = [1], - fix_values = [0.0], - dzb = ones(nz), - dqf = ones(nz), - # Expected solutions - dqb = zeros(nz), - dGb = [ - 0.0 0.0 0.0 - 0.0 0.0 -5/3 - 0.0 0.0 -10/3 - 0.0 0.0 0.0 - ], - dhb = [0.0, 1 / 3, 2 / 3, 0.0], - dAb = [0.0 0.0 -5 / 3], - dbb = [1 / 3], - ) -end - -@testset "Differentiating LP with SAF, SV with LE, GE constraints" begin - """ - max 2x + 3y + 4z - s.t. 3x+2y+z <= 10 - 2x+5y+3z <= 15 - x ≤ 3 - 0 ≤ y ≤ 2 - 6 ≥ z ≥ -1 - x, y, z >= 0 - variant of previous test with same solution - """ - nz = 3 - qp_test_with_solutions( - HiGHS.Optimizer; - q = [-2.0, -3.0, -4.0], - G = [ - 3.0 2.0 1.0 - 2.0 5.0 3.0 - -1.0 0.0 0.0 - 0.0 -1.0 0.0 - 0.0 0.0 -1.0 - ], - h = [10.0, 15.0, 0.0, 0.0, 0.0], #5 - ub_indices = [1, 2, 3], - ub_values = [3.0, 2.0, 6.0], - lb_indices = [1], - lb_values = [-1.0], - dzb = ones(nz), - dqb = zeros(nz), - dGb = [ - 0.0 0.0 0.0 - 0.0 0.0 -5/3 - 0.0 0.0 5/3 - 0.0 0.0 -10/3 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - 0.0 0.0 0.0 - ], - dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0, 0.0, 0.0, 0.0, 0.0], - ) -end - -# TODO: split file here -# above is QP Back -# below is conic forw - -function test_simple_socp(eq_vec::Bool) - # referred from _soc2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L1355 - # find equivalent diffcp python program here: https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_socp_1_py.ipynb - - # Problem SOC2 - # min x - # s.t. y ≥ 1/√2 - # x² + y² ≤ 1 - # in conic form: - # min x - # s.t. -1/√2 + y ∈ R₊ - # 1 - t ∈ {0} - # (t,x,y) ∈ SOC₃ - - model = DiffOpt.diff_optimizer(SCS.Optimizer) - MOI.set(model, MOI.Silent(), true) - x, y, t = MOI.add_variables(model, 3) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - 1.0x, - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - if eq_vec - ceq = MOI.add_constraint( - model, - MOI.Utilities.vectorize([-1.0t + 1.0]), - MOI.Zeros(1), - ) - else - ceq = MOI.add_constraint(model, -1.0t, MOI.EqualTo(-1.0)) - end - cnon = MOI.add_constraint( - model, - MOI.Utilities.vectorize([1.0y - 1 / √2]), - MOI.Nonnegatives(1), - ) - csoc = MOI.add_constraint( - model, - MOI.Utilities.vectorize([1.0t, 1.0x, 1.0y]), - MOI.SecondOrderCone(3), - ) - - MOI.optimize!(model) - - if eq_vec - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - ceq, - MOI.Utilities.vectorize([1.0 * x]), - ) - else - MOI.set(model, DiffOpt.ForwardConstraintFunction(), ceq, 1.0 * x) - end - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - cnon, - MOI.Utilities.vectorize([1.0 * y]), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - csoc, - MOI.Utilities.operate(vcat, Float64, 1.0 * t, 0.0, 0.0), - ) - - DiffOpt.forward_differentiate!(model) - - # these matrices are benchmarked with the output generated by diffcp - # refer the python file mentioned above to get equivalent python source code - @test model.diff.model.x ≈ [-1 / √2; 1 / √2; 1.0] atol = ATOL rtol = RTOL - if eq_vec - @test model.diff.model.s ≈ [0.0, 0.0, 1.0, -1 / √2, 1 / √2] atol = ATOL rtol = - RTOL - @test model.diff.model.y ≈ [√2, 1.0, √2, 1.0, -1.0] atol = ATOL rtol = - RTOL - else - @test model.diff.model.s ≈ [0.0, 1.0, -1 / √2, 1 / √2, 0.0] atol = ATOL rtol = - RTOL - @test model.diff.model.y ≈ [1.0, √2, 1.0, -1.0, √2] atol = ATOL rtol = - RTOL - end - - dx = [1.12132144; 1 / √2; 1 / √2] - for (i, vi) in enumerate([x, y, t]) - @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - ATOL rtol = RTOL - end - # @test dx ≈ [1.12132144; 1/√2; 1/√2] atol=ATOL rtol=RTOL - # @test ds ≈ [0.0; 0.0; -2.92893438e-01; 1.12132144e+00; 7.07106999e-01] atol=ATOL rtol=RTOL - # @test dy ≈ [2.4142175; 5.00000557; 3.8284315; √2; -4.00000495] atol=ATOL rtol=RTOL -end - -@testset "Differentiating simple SOCP" begin - for eq_vec in [false, true] - test_simple_socp(eq_vec) - end -end - -# refered from _psd0test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L3919 -# find equivalent diffcp program here: https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_1_py.ipynb - -# min X[1,1] + X[2,2] max y -# X[2,1] = 1 [0 y/2 [ 1 0 -# y/2 0 <= 0 1] -# X >= 0 y free -# Optimal solution: -# -# ⎛ 1 1 ⎞ -# X = ⎜ ⎟ y = 2 -# ⎝ 1 1 ⎠ -function simple_psd(solver) - model = DiffOpt.diff_optimizer(solver) - MOI.set(model, MOI.Silent(), true) - X = MOI.add_variables(model, 3) - vov = MOI.VectorOfVariables(X) - cX = MOI.add_constraint( - model, - VAF(vov), - MOI.PositiveSemidefiniteConeTriangle(2), - ) - - c = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - [MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(1.0, X[2]))], - [-1.0], - ), - MOI.Zeros(1), - ) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - 1.0 * X[1] + 1.0 * X[end], - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - MOI.optimize!(model) - - x = MOI.get(model, MOI.VariablePrimal(), X) - - cone_types = unique([ - S for (F, S) in - MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) - ]) - conic_form = DiffOpt.ConicProgram.Form{Float64}() - cones = conic_form.constraints.sets - DiffOpt.set_set_types(cones, cone_types) - index_map = MOI.copy_to(conic_form, model) - - # s = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintPrimal(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) - # y = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintDual(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) - - @test x ≈ ones(3) atol = ATOL rtol = RTOL - # @test s ≈ [0.0; ones(3)] atol=ATOL rtol=RTOL - # @test y ≈ [2.0, 1.0, -1.0, 1.0] atol=ATOL rtol=RTOL - - # test1: changing the constant in `c`, i.e. changing value of X[2] - MOI.set(model, DiffOpt.ForwardConstraintFunction(), c, _vaf([1.0])) - - DiffOpt.forward_differentiate!(model) - - dx = -ones(3) - for (i, vi) in enumerate(X) - @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - ATOL rtol = RTOL - end - - # @test dx ≈ -ones(3) atol=ATOL rtol=RTOL # will change the value of other 2 variables - # @test ds[2:4] ≈ -ones(3) atol=ATOL rtol=RTOL # will affect PSD constraint too - - # test2: changing X[1], X[3] but keeping the objective (their sum) same - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c, - MOI.Utilities.zero_with_output_dimension( - MOI.VectorAffineFunction{Float64}, - 1, - ), - ) - MOI.set(model, DiffOpt.ForwardObjectiveFunction(), -1.0X[1] + 1.0X[3]) +import IterativeSolvers +import MathOptInterface as MOI - DiffOpt.forward_differentiate!(model) +const ATOL = 2e-4 +const RTOL = 2e-4 - # @test dx ≈ [1.0, 0.0, -1.0] atol=ATOL rtol=RTOL # note: no effect on X[2] - dx = [1.0, 0.0, -1.0] - for (i, vi) in enumerate(X) - @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - ATOL rtol = RTOL +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$name", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end end + return end -@testset "Differentiating simple PSD program" begin - simple_psd(SCS.Optimizer) -end - -@testset "Differentiating conic with PSD and SOC constraints" begin - # similar to _psd1test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4054 - # find equivalent diffcp example here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_2_py.ipynb - - # | 2 1 0 | - # min | 1 2 1 | . X + x1 - # | 0 1 2 | - # - # - # s.t. | 1 0 0 | - # | 0 1 0 | . X + x1 = 1 - # | 0 0 1 | - # - # | 1 1 1 | - # | 1 1 1 | . X + x2 + x3 = 1/2 - # | 1 1 1 | - # - # (x1,x2,x3) in C^3_q - # X in C_psd - # - # The dual is - # max y1 + y2/2 - # - # s.t. | y1+y2 y2 y2 | - # | y2 y1+y2 y2 | in C_psd - # | y2 y2 y1+y2 | - # - # (1-y1, -y2, -y2) in C^3_q - - model = DiffOpt.diff_optimizer(SCS.Optimizer) - MOI.set(model, MOI.Silent(), true) - - δ = √(1 + (3 * √2 + 2) * √(-116 * √2 + 166) / 14) / 2 - ε = √((1 - 2 * (√2 - 1) * δ^2) / (2 - √2)) - y2 = 1 - ε * δ - y1 = 1 - √2 * y2 - obj = y1 + y2 / 2 - k = -2 * δ / ε - x2 = ((3 - 2obj) * (2 + k^2) - 4) / (4 * (2 + k^2) - 4 * √2) - α = √(3 - 2obj - 4x2) / 2 - β = k * α - - X = MOI.add_variables(model, 6) - x = MOI.add_variables(model, 3) - - vov = MOI.VectorOfVariables(X) - cX = MOI.add_constraint( - model, - VAF(vov), - MOI.PositiveSemidefiniteConeTriangle(3), - ) - cx = MOI.add_constraint( - model, - VAF(MOI.VectorOfVariables(x)), - MOI.SecondOrderCone(3), - ) - - c1 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.( - 1:1, - MOI.ScalarAffineTerm.( - [1.0, 1.0, 1.0, 1.0], - [X[1], X[3], X[end], x[1]], - ), - ), - [-1.0], - ), - MOI.Zeros(1), - ) - c2 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.( - 1:1, - MOI.ScalarAffineTerm.( - [1.0, 2, 1, 2, 2, 1, 1, 1], - [X; x[2]; x[3]], - ), - ), - [-0.5], - ), - MOI.Zeros(1), +function test_moi_test_runtests() + model = DiffOpt.diff_optimizer(HiGHS.Optimizer) + # `Variable.ZerosBridge` makes dual needed by some tests fail. + MOI.Bridges.remove_bridge( + model.optimizer.optimizer, + MOI.Bridges.Variable.ZerosBridge{Float64}, ) - - # this is a useless constraint - refer the tests below - # even if we comment this, it won't affect the optimal values - c_extra = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.(1:1, MOI.ScalarAffineTerm.(ones(3), x)), - [100.0], - ), - MOI.Nonnegatives(1), - ) - - objXidx = [1:3; 5:6] - objXcoefs = 2 * ones(5) - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - MOI.ScalarAffineFunction( - MOI.ScalarAffineTerm.([objXcoefs; 1.0], [X[objXidx]; x[1]]), - 0.0, - ), - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - MOI.optimize!(model) - - _x = MOI.get(model, MOI.VariablePrimal(), x) - _X = MOI.get(model, MOI.VariablePrimal(), X) - - cone_types = unique([ - S for (F, S) in - MOI.get(model.optimizer, MOI.ListOfConstraintTypesPresent()) - ]) - conic_form = DiffOpt.ConicProgram.Form{Float64}() - cones = conic_form.constraints.sets - DiffOpt.set_set_types(cones, cone_types) - index_map = MOI.copy_to(conic_form, model) - - # s = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintPrimal(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) - # y = DiffOpt.map_rows((ci, r) -> MOI.get(model.optimizer, MOI.ConstraintDual(), ci), model.optimizer, cones, index_map, DiffOpt.Flattened{Float64}()) - - @test _X ≈ [ - 0.21725121 - -0.25996907 - 0.31108582 - 0.21725009 - -0.25996907 - 0.21725121 - ] atol = ATOL rtol = RTOL - @test _x ≈ [0.2544097; 0.17989425; 0.17989425] atol = ATOL rtol = RTOL - - # @test x ≈ [ 0.21725121; -0.25996907; 0.31108582; 0.21725009; -0.25996907; 0.21725121; - # 0.2544097; 0.17989425; 0.17989425] atol=ATOL rtol=RTOL - # @test s ≈ [ - # 0.0, 0.0, 100.614, - # 0.254408, 0.179894, 0.179894, - # 0.217251, -0.25997, 0.31109, 0.217251, -0.25997, 0.217251, - # ] atol=ATOL rtol=RTOL - # TODO: it should be 100, not 100.614, its surely a residual error - # Joaquim: it is not an error it is sum(_x) - # there seemss to be an issue with this test (note the low precision) - # - # @test y ≈ [ - # 0.544758, 0.321905, 0.0, - # 0.455242, -0.321905, -0.321905, - # 1.13334, 0.678095, 1.13334, -0.321905, 0.678095, 1.13334, - # ] atol=ATOL rtol=RTOL - - # test c_extra - MOI.set(model, DiffOpt.ForwardConstraintFunction(), c_extra, _vaf([1.0])) - - DiffOpt.forward_differentiate!(model) - - # a small change in the constant in c_extra should not affect any other variable or constraint other than c_extra itself - for (i, vi) in enumerate(X) - @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - 1e-2 rtol = RTOL - end - for (i, vi) in enumerate(x) - @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - 1e-2 rtol = RTOL - end - # @test dx ≈ zeros(9) atol=1e-2 - # @test dy ≈ zeros(12) atol=0.012 - # @test [ds[1:2]; ds[4:end]] ≈ zeros(11) atol=1e-2 - # @test ds[3] ≈ 1.0 atol=1e-2 # except c_extra itself -end - -@testset "Differentiating conic with PSD and POS constraints" begin - # refer psdt2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4306 - # find equivalent diffcp program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_3_py.ipynb - - model = DiffOpt.diff_optimizer(SCS.Optimizer) MOI.set(model, MOI.Silent(), true) - - x = MOI.add_variables(model, 7) - @test MOI.get(model, MOI.NumberOfVariables()) == 7 - - η = 10.0 - - c1 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.(1, MOI.ScalarAffineTerm.(-1.0, x[1:6])), - [η], - ), - MOI.Nonnegatives(1), - ) - c2 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.(1:6, MOI.ScalarAffineTerm.(1.0, x[1:6])), - zeros(6), - ), - MOI.Nonnegatives(6), - ) - α = 0.8 - δ = 0.9 - c3 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.( - [fill(1, 7); fill(2, 5); fill(3, 6)], - MOI.ScalarAffineTerm.( - [ - δ / 2, - α, - δ, - δ / 4, - δ / 8, - 0.0, - -1.0, - -δ / (2 * √2), - -δ / 4, - 0, - -δ / (8 * √2), - 0.0, - δ / 2, - δ - α, - 0, - δ / 8, - δ / 4, - -1.0, - ], - [x[1:7]; x[1:3]; x[5:6]; x[1:3]; x[5:7]], - ), - ), - zeros(3), - ), - MOI.PositiveSemidefiniteConeTriangle(2), - ) - c4 = MOI.add_constraint( - model, - MOI.VectorAffineFunction( - MOI.VectorAffineTerm.( - 1, - MOI.ScalarAffineTerm.(0.0, [x[1:3]; x[5:6]]), - ), - [0.0], - ), - MOI.Zeros(1), - ) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x[7])], 0.0), - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE) - - MOI.optimize!(model) - - # dc = ones(7) - MOI.set( - model, - DiffOpt.ForwardObjectiveFunction(), - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(ones(7), x), 0.0), - ) - # db = ones(11) - # dA = ones(11, 7) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c1, - MOI.Utilities.vectorize(ones(1, 7) * x + ones(1)), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c2, - MOI.Utilities.vectorize(ones(6, 7) * x + ones(6)), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c3, - MOI.Utilities.vectorize(ones(3, 7) * x + ones(3)), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c4, - MOI.Utilities.vectorize(ones(1, 7) * x + ones(1)), - ) - - DiffOpt.forward_differentiate!(model) - - @test model.diff.model.x ≈ - [20 / 3.0, 0.0, 10 / 3.0, 0.0, 0.0, 0.0, 1.90192379] atol = ATOL rtol = - RTOL - @test model.diff.model.s ≈ [ - 0.0, - 0.0, - 20 / 3.0, - 0.0, - 10 / 3.0, - 0.0, - 0.0, - 0.0, - 4.09807621, - -2.12132, - 1.09807621, - ] atol = ATOL rtol = RTOL - @test model.diff.model.y ≈ [ - 0.0, - 0.19019238, - 0.0, - 0.12597667, - 0.0, - 0.14264428, - 0.14264428, - 0.01274047, - 0.21132487, - 0.408248, - 0.78867513, - ] atol = ATOL rtol = RTOL - - atol = 0.3 - rtol = 0.01 - - # compare these with https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_3_py.ipynb - # results are not exactly as: 1. there is some residual error 2. diffcp results are SCS specific, hence scaled - dx = [-39.6066, 10.8953, -14.9189, 10.9054, 10.883, 10.9118, -21.7508] - for (i, vi) in enumerate(x) - @test dx[i] ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), vi) atol = - atol rtol = rtol - end - # @test dy ≈ [0.0, -3.56905, 0.0, -0.380035, 0.0, -0.41398, -0.385321, -0.00743119, -0.644986, -0.550542, -2.36765] atol=atol rtol=rtol - # @test ds ≈ [0.0, 0.0, -50.4973, 0.0, -25.8066, 0.0, 0.0, 0.0, -7.96528, -1.62968, -2.18925] atol=atol rtol=rtol - - # TODO: future example, how to differentiate wrt a specific constraint/variable, refer QPLib article for more - dA = zeros(11, 7) - dA[3:8, 1:6] = Matrix{Float64}(LinearAlgebra.I, 6, 6) # differentiating only wrt POS constraint c2 - db = zeros(11) - dc = zeros(7) - - # db = zeros(11) - # dA = zeros(11, 7) - # dA[3:8, 1:6] = Matrix{Float64}(LinearAlgebra.I, 6, 6) # differentiating only wrt POS constraint c2 - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c1, - MOI.Utilities.zero_with_output_dimension(VAF, 1), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c2, - MOI.Utilities.vectorize(ones(6) .* x[1:6]), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c3, - MOI.Utilities.zero_with_output_dimension(VAF, 3), - ) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c4, - MOI.Utilities.zero_with_output_dimension(VAF, 1), - ) - - DiffOpt.forward_differentiate!(model) - - # for (i, vi) in enumerate(X) - # @test 0.0 ≈ MOI.get(model, - # DiffOpt.ForwardVariablePrimal(), vi) atol=1e-2 rtol=RTOL - # end - - # TODO add a test here, probably on duals - - # # note that there's no change in the PSD slack values or dual optimas - # @test dy ≈ [0.0, 0.0, 0.0, 0.125978, 0.0, 0.142644, 0.142641, 0.0127401, 0.0, 0.0, 0.0] atol=atol rtol=RTOL - # @test ds ≈ [0.0, 0.0, -6.66672, 0.0, -3.33336, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] atol=atol rtol=RTOL + config = MOI.Test.Config(; atol = 1e-7) + MOI.Test.runtests(model, config) + return end -@testset "Differentiating a simple PSD" begin - # refer _psd3test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L4484 - # find equivalent diffcp program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/diffcp_sdp_0_py.ipynb - - # min x - # s.t. [x 1 1] - # [1 x 1] ⪰ 0 - # [1 1 x] - - model = DiffOpt.diff_optimizer(SCS.Optimizer) +function test_FEASIBILITY_SENSE_zeros_objective() + model = DiffOpt.diff_optimizer(HiGHS.Optimizer) MOI.set(model, MOI.Silent(), true) - x = MOI.add_variable(model) - - func = MOI.Utilities.operate( - vcat, - Float64, - x, - one(Float64), - x, - one(Float64), - one(Float64), - x, - ) - - # do not confuse this constraint with the matrix `c` in the conic form (of the matrices A, b, c) - c = MOI.add_constraint(model, func, MOI.PositiveSemidefiniteConeTriangle(3)) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [x]), 0.0), - ) + MOI.add_constraint(model, x, MOI.GreaterThan(1.0)) MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - + MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x) MOI.optimize!(model) - - # SCS/Mosek specific - # @test s' ≈ [1. 1.41421356 1.41421356 1. 1.41421356 1. ] atol=ATOL rtol=RTOL - # @test y' ≈ [ 0.33333333 -0.23570226 -0.23570226 0.33333333 -0.23570226 0.33333333] atol=ATOL rtol=RTOL - - # dA = zeros(6, 1) - # db = ones(6) - MOI.set(model, DiffOpt.ForwardConstraintFunction(), c, _vaf(ones(6))) - # dc = zeros(1) - - DiffOpt.forward_differentiate!(model) - - @test model.diff.model.x ≈ [1.0] atol = 10ATOL rtol = 10RTOL - @test model.diff.model.s ≈ ones(6) atol = ATOL rtol = RTOL - @test model.diff.model.y ≈ [1 / 3, -1 / 6, 1 / 3, -1 / 6, -1 / 6, 1 / 3] atol = - ATOL rtol = RTOL - - @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = - RTOL - - # @test dx ≈ [-0.5] atol=ATOL rtol=RTOL - # @test dy ≈ zeros(6) atol=ATOL rtol=RTOL - # @test ds ≈ [0.5, 1.0, 0.5, 1.0, 1.0, 0.5] atol=ATOL rtol=RTOL - - # test 2 - dA = zeros(6, 1) - db = zeros(6) - MOI.set( - model, - DiffOpt.ForwardConstraintFunction(), - c, - MOI.Utilities.zero_with_output_dimension(VAF, 6), - ) - MOI.set(model, DiffOpt.ForwardObjectiveFunction(), 1.0 * x) - - DiffOpt.forward_differentiate!(model) - - @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = - RTOL - - # @test dx ≈ zeros(1) atol=ATOL rtol=RTOL - # @test dy ≈ [0.333333, -0.333333, 0.333333, -0.333333, -0.333333, 0.333333] atol=ATOL rtol=RTOL - # @test ds ≈ zeros(6) atol=ATOL rtol=RTOL -end - -@testset "Verifying cache after differentiating a QP" begin - Q = [ - 4.0 1.0 - 1.0 2.0 - ] - q = [1.0, 1.0] - G = [1.0 1.0] - h = [-1.0] - - model = DiffOpt.diff_optimizer(SCS.Optimizer) - MOI.set(model, MOI.Silent(), true) - x = MOI.add_variables(model, 2) - - # define objective - quad_terms = MOI.ScalarQuadraticTerm{Float64}[] - for i in 1:2 - for j in i:2 # indexes (i,j), (j,i) will be mirrored. specify only one kind - push!(quad_terms, MOI.ScalarQuadraticTerm(Q[i, j], x[i], x[j])) - end - end - objective_function = MOI.ScalarQuadraticFunction( - quad_terms, - MOI.ScalarAffineTerm.(q, x), - 0.0, - ) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), - objective_function, - ) - - # add constraint - c = MOI.add_constraint( - model, - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), - MOI.LessThan(h[1]), - ) + @test MOI.get(model, MOI.ObjectiveValue()) ≈ 1.0 atol = ATOL rtol = RTOL + MOI.set(model, MOI.ObjectiveSense(), MOI.FEASIBILITY_SENSE) MOI.optimize!(model) - - x_sol = MOI.get(model, MOI.VariablePrimal(), x) - @test x_sol ≈ [-0.25, -0.75] atol = ATOL rtol = RTOL - - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) - - @test model.diff === nothing - DiffOpt.reverse_differentiate!(model) - - grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - @test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL - - # adding two variables invalidates the cache - y = MOI.add_variables(model, 2) - MOI.delete(model, y) - - @test model.diff === nothing - MOI.optimize!(model) - - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) - DiffOpt.reverse_differentiate!(model) - - grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - @test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL - - # adding single variable invalidates the cache - y0 = MOI.add_variable(model) - @test model.diff === nothing - MOI.add_constraint( - model, - MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, y0)], 0.0), - MOI.EqualTo(42.0), - ) - - MOI.optimize!(model) - @test model.diff === nothing - - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) - DiffOpt.reverse_differentiate!(model) - grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - @test grad_wrt_h ≈ -1.0 atol = 5e-3 rtol = RTOL - @test model.diff.model.gradient_cache isa DiffOpt.QuadraticProgram.Cache - - # adding constraint invalidates the cache - c2 = MOI.add_constraint( - model, - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, 1.0], x), 0.0), - MOI.LessThan(0.0), - ) - @test model.diff === nothing - MOI.optimize!(model) - - MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) - DiffOpt.reverse_differentiate!(model) - grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - @test grad_wrt_h ≈ -1.0 atol = 5e-3 rtol = RTOL - # second constraint inactive - grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c2)) - @test grad_wrt_h ≈ 0.0 atol = 5e-3 rtol = RTOL - @test model.diff.model.gradient_cache isa DiffOpt.QuadraticProgram.Cache + @test MOI.get(model, MOI.ObjectiveValue()) == 0.0 + return end -@testset "Verifying cache on a PSD" begin - model = DiffOpt.diff_optimizer(SCS.Optimizer) +function test_forward_or_reverse_without_optimizing_throws() + model = DiffOpt.diff_optimizer(HiGHS.Optimizer) MOI.set(model, MOI.Silent(), true) - x = MOI.add_variable(model) - - func = MOI.Utilities.operate(vcat, Float64, x, 1.0, x, 1.0, 1.0, x) - - c = MOI.add_constraint(model, func, MOI.PositiveSemidefiniteConeTriangle(3)) - - MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [x]), 0.0), - ) + MOI.add_constraint(model, x, MOI.GreaterThan(1.0)) MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - @test model.diff === nothing + MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x) + # do not optimize, just try to differentiate + @test_throws ErrorException DiffOpt.forward_differentiate!(model) + @test_throws ErrorException DiffOpt.reverse_differentiate!(model) + # impossible constraint + MOI.add_constraint(model, x, MOI.LessThan(0.5)) MOI.optimize!(model) - @test model.diff === nothing - - dA = zeros(6, 1) - db = ones(6) - MOI.set(model, DiffOpt.ForwardConstraintFunction(), c, _vaf(ones(6))) - dc = zeros(1) - - DiffOpt.forward_differentiate!(model) - - @test model.diff.model.x ≈ [1.0] atol = 10ATOL rtol = 10RTOL - @test model.diff.model.s ≈ ones(6) atol = ATOL rtol = RTOL - @test model.diff.model.y ≈ [1 / 3, -1 / 6, 1 / 3, -1 / 6, -1 / 6, 1 / 3] atol = - ATOL rtol = RTOL - - @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = - RTOL - - @test model.diff.model.gradient_cache isa DiffOpt.ConicProgram.Cache - - DiffOpt.forward_differentiate!(model) - - @test -0.5 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = - RTOL - - # test 2 - MOI.set(model, DiffOpt.ForwardConstraintFunction(), c, _vaf(zeros(6))) - MOI.set(model, DiffOpt.ForwardObjectiveFunction(), 1.0 * x) - - DiffOpt.forward_differentiate!(model) - - @test 0.0 ≈ MOI.get(model, DiffOpt.ForwardVariablePrimal(), x) atol = 1e-2 rtol = - RTOL -end + @test_throws ErrorException DiffOpt.forward_differentiate!(model) + @test_throws ErrorException DiffOpt.reverse_differentiate!(model) + return +end + +struct TestSolver end + +# always use IterativeSolvers +function DiffOpt.QuadraticProgram.solve_system( + ::TestSolver, + LHS, + RHS, + iterative::Bool, +) + return IterativeSolvers.lsqr(LHS, RHS) +end + +function test_setting_the_linear_solver_in_the_quadratic_solver() + model = DiffOpt.QuadraticProgram.Model() + @test MOI.supports(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) + @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) === + nothing + MOI.set(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver(), TestSolver()) + @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) == + TestSolver() + MOI.empty!(model) + @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) == + TestSolver() + return +end + +function _test_dU_from_dQ(U, dU) + dQ = dU'U + U'dU + _dU = copy(dQ) + __dU = copy(dQ) + # Compiling + DiffOpt.dU_from_dQ!(__dU, U) + @test @allocated(DiffOpt.dU_from_dQ!(_dU, U)) == 0 + @test _dU ≈ dU + return +end + +function test_dU_from_dQ() + _test_dU_from_dQ(2ones(1, 1), 3ones(1, 1)) + U = [1 2; 0 1] + dU = [1 -1; 0 1] + _test_dU_from_dQ(U, dU) + U = [-3 5; 0 2.5] + dU = [2 3.5; 0 -2] + _test_dU_from_dQ(U, dU) + U = [1.5 2 -1; 0 -1 3.5; 0 0 -2] + dU = [2.5 -1 2; 0 5 -3; 0 0 3] + _test_dU_from_dQ(U, dU) + return +end + +end # module + +TestMOIWrapper.runtests() diff --git a/test/qp_forward.jl b/test/qp_forward.jl deleted file mode 100644 index 0fd75a8c..00000000 --- a/test/qp_forward.jl +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -import HiGHS -@testset "Differentiating LP; checking gradients for non-active contraints" begin - # Issue #40 from Gurobi.jl - # min x - # s.t. x >= 0 - # x >= 3 - qp_test_with_solutions( - HiGHS.Optimizer; - q = [1.0], - G = -ones(2, 1), - h = [0.0, -3.0], - dzb = -ones(1), - dhf = [0.0, 1.0], - # Expected solutions - z = [3.0], - λ = [0.0, 1.0], - dzf = -ones(1), - dλb = zeros(2), - dhb = [0.0, 1.0], - ∇zb = zeros(1), - ∇λb = [0.0, -1.0], - dλf = zeros(2), - ) -end diff --git a/test/quadratic_program.jl b/test/quadratic_program.jl new file mode 100644 index 00000000..2c3f4048 --- /dev/null +++ b/test/quadratic_program.jl @@ -0,0 +1,570 @@ +# Copyright (c) 2020: Akshay Sharma and contributors +# +# Use of this source code is governed by an MIT-style license that can be found +# in the LICENSE.md file or at https://opensource.org/licenses/MIT. + +module TestQuadraticProgram + +using Test +import DelimitedFiles +import DiffOpt +import HiGHS +import Ipopt +import MathOptInterface as MOI +import SCS + +const ATOL = 2e-4 +const RTOL = 2e-4 + +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$name", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end + end + return +end + +include(joinpath(@__DIR__, "utils.jl")) + +function test_forward_on_trivial_QP() + # using example on https://osqp.org/docs/examples/setup-and-solve.html + Q = [4.0 1.0; 1.0 2.0] + q = [1.0, 1.0] + G = [ + 1.0 1.0 + 1.0 0.0 + 0.0 1.0 + -1.0 -1.0 + -1.0 0.0 + 0.0 -1.0 + ] + h = [1, 0.7, 0.7, -1, 0, 0] + qp_test_with_solutions( + Ipopt.Optimizer; + Q = Q, + q = q, + G = G, + h = h, + dzb = ones(2), + dQf = [1 -1; -1 1.0], + dqf = [1, -1.0], + dGf = ones(6, 2), + dhf = ones(6), + # Expected solutions + z = [0.3, 0.7], + ) + return +end + +function test_differentiating_trivial_qp_1() + Q = [ + 4.0 1.0 + 1.0 2.0 + ] + q = [1.0, 1.0] + G = [1.0 1.0] + h = [-1.0] + + model = DiffOpt.diff_optimizer(Ipopt.Optimizer) + MOI.set(model, MOI.Silent(), true) + x = MOI.add_variables(model, 2) + + qp_test_with_solutions( + Ipopt.Optimizer; + Q = Q, + q = q, + G = G, + h = h, + dzb = ones(2), + dQf = -ones(2, 2), + dqf = ones(2), + dGf = ones(1, 2), + dhf = -ones(1), + # Expected solutions + z = [-0.25; -0.75], + dhb = ones(1), + ) + return +end + +# @testset "Differentiating a non-convex QP" begin +# Q = [0.0 0.0; 1.0 2.0] +# q = [1.0; 1.0] +# G = [1.0 1.0;] +# h = [-1.0;] +# +# model = DiffOpt.diff_optimizer(Ipopt.Optimizer) +# x = MOI.add_variables(model, 2) +# +# # define objective +# quad_terms = MOI.ScalarQuadraticTerm{Float64}[] +# for i in 1:2 +# for j in i:2 # indexes (i,j), (j,i) will be mirrored. specify only one kind +# push!( +# quad_terms, +# MOI.ScalarQuadraticTerm(Q[i,j], x[i], x[j]), +# ) +# end +# end +# +# objective_function = MOI.ScalarQuadraticFunction( +# quad_terms, +# MOI.ScalarAffineTerm.(q, x), +# 0.0, +# ) +# MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), objective_function) +# MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) +# +# # add constraint +# MOI.add_constraint( +# model, +# MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), +# MOI.LessThan(h[1]), +# ) +# +# @test_throws ErrorException MOI.optimize!(model) # should break +# end + +function test_differentiating_qp_with_inequality_and_equality_constraints() + # refered from: https://www.mathworks.com/help/optim/ug/quadprog.html#d120e113424 + # Find equivalent qpth program here - https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_4_py.ipynb + Q = [ + 1.0 -1.0 1.0 + -1.0 2.0 -2.0 + 1.0 -2.0 4.0 + ] + q = [2.0, -3.0, 1.0] + G = [ + 0.0 0.0 1.0 + 0.0 1.0 0.0 + 1.0 0.0 0.0 + 0.0 0.0 -1.0 + 0.0 -1.0 0.0 + -1.0 0.0 0.0 + ] + h = [1.0, 1.0, 1.0, 0.0, 0.0, 0.0] + A = [1.0 1.0 1.0;] + b = [0.5] + qp_test_with_solutions( + Ipopt.Optimizer; + Q = Q, + q = q, + G = G, + h = h, + A = A, + b = b, + dzb = ones(3), + dQf = ones(3, 3), + dqf = ones(3), + dGf = ones(6, 3), + dhf = ones(6), + dAf = ones(1, 3), + dbf = ones(1), + # Expected solutions + z = [0.0, 0.5, 0.0], + dQb = zeros(3, 3), + dqb = zeros(3), + dGb = zeros(6, 3), + dhb = zeros(6), + dAb = [0.0 -0.5 0.0], + dbb = [1.0], + ) + return +end + +# refered from https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contquadratic.jl#L3 +# Find equivalent CVXPYLayers and QPTH code here: +# https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_1_py.ipynb +function test_differentiating_moi_examples_1() + # homogeneous quadratic objective + # Min x^2 + xy + y^2 + yz + z^2 + # st x + 2y + 3z >= 4 (c1) + # x + y >= 1 (c2) + # x, y, z \in R + Q = [ + 2.0 1.0 0.0 + 1.0 2.0 1.0 + 0.0 1.0 2.0 + ] + q = zeros(3) + G = [ + -1.0 -2.0 -3.0 + -1.0 -1.0 0.0 + ] + h = [-4.0, -1.0] + dQ = [ + -0.12244895 0.01530609 -0.11224488 + 0.01530609 0.09183674 0.07653058 + -0.11224488 0.07653058 -0.06122449 + ] + dq = [-0.2142857, 0.21428567, -0.07142857] + dG = [ + 0.05102692 0.30612244 0.25510856 + 0.06120519 0.36734693 0.30610315 + ] + dh = [-0.35714284; -0.4285714] + qp_test_with_solutions( + Ipopt.Optimizer; + Q = Q, + q = q, + G = G, + h = h, + dzb = ones(3), + dQf = dQ, + dqf = dq, + dGf = dG, + dhf = dh, + # Expected solutions + dQb = dQ, + dqb = dq, + dGb = dG, + dhb = dh, + ) + return +end + +# refered from https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contquadratic.jl#L3 +# Find equivalent CVXPYLayers and QPTH code here: +# https://github.com/AKS1996/jump-gsoc-2020/blob/master/DiffOpt_tests_2_py.ipynb +function test_differentiating_moi_examples_2() + # non-homogeneous quadratic objective + # minimize 2 x^2 + y^2 + xy + x + y + # s.t. x, y >= 0 + # x + y = 1 + Q = [ + 4 1.0 + 1 2 + ] + q = [1, 1.0] + G = [ + -1 0.0 + 0 -1 + ] + h = [0, 0.0] + A = [1 1.0] + b = [1.0] + dQ = [ + -0.05 -0.05 + -0.05 0.15 + ] + dq = [-0.2, 0.2] + dG = zeros(2, 2) + dh = zeros(2) + dA = [0.375 -1.075] + db = [0.7] + qp_test_with_solutions( + Ipopt.Optimizer; + Q = Q, + q = q, + G = G, + h = h, + A = A, + b = b, + dzb = [1.3, 0.5], + dQf = dQ, + dqf = dq, + dGf = dG, + dhf = dh, + dAf = dA, + dbf = db, + # Expected solutions + dQb = dQ, + dqb = dq, + dGb = dG, + dhb = dh, + dAb = dA, + dbb = db, + z = [0.25, 0.75], + dzf = [1.4875, -0.075], + ∇zf = [-1.28125, 3.25625], + ∇zb = [-0.2, 0.2], + λ = zeros(2), + dλf = zeros(2), + dλb = zeros(2), + ∇λb = [0.8, -0.8 / 3], + ν = [-2.75], + dνb = zeros(1), + ∇νb = [-0.7], + ) + return +end + +function test_differentiating_non_trivial_convex_qp_moi() + nz = 10 + nineq_le = 25 + neq = 10 + # read matrices from files + names = ["P", "q", "G", "h", "A", "b"] + matrices = [] + for name in names + filename = joinpath(@__DIR__, "data", "$name.txt") + push!(matrices, DelimitedFiles.readdlm(filename, ' ', Float64, '\n')) + end + Q, q, G, h, A, b = matrices + q = vec(q) + h = vec(h) + b = vec(b) + # read gradients from files + names = ["dP", "dq", "dG", "dh", "dA", "db"] + grads_actual = [] + for name in names + filename = joinpath(@__DIR__, "data", "$name.txt") + push!( + grads_actual, + DelimitedFiles.readdlm(filename, ' ', Float64, '\n'), + ) + end + dqb = vec(grads_actual[2]) + dhb = vec(grads_actual[4]) + dbb = vec(grads_actual[6]) + qp_test( + Ipopt.Optimizer, + true, + true, + true; + Q = Q, + q = q, + G = G, + h = h, + A = A, + b = b, + dzb = ones(nz), + dQf = ones(nz, nz), + dqf = ones(nz), + dGf = ones(length(h), nz), + dhf = ones(length(h)), + dAf = ones(length(b), nz), + dbf = ones(length(b)), + # Expected solutions + dqb = dqb, + dhb = dhb, + dbb = dbb, + atol = 1e-3, # The values in `data` seems to have low accuracy + rtol = 1e-3, # The values in `data` seems to have low accuracy + ) + return +end + +function test_differentiating_LP_checking_gradients_for_non_active_contraints() + # Issue #40 from Gurobi.jl + # min x + # s.t. x >= 0 + # x >= 3 + nz = 1 + qp_test_with_solutions( + HiGHS.Optimizer; + q = ones(nz), + G = -ones(2, nz), + h = [0.0, -3.0], + dzb = ones(nz), + dqf = ones(nz), + # Expected solutions + dGb = [0.0, 3.0], + dhb = [0.0, -1.0], + ) + return +end + +function test_differentiating_a_simple_LP_with_GreaterThan_constraint() + # this is canonically same as above test + # min x + # s.t. x >= 3 + nz = 1 + qp_test_with_solutions( + Ipopt.Optimizer; + q = ones(nz), + G = -ones(1, nz), + h = [-3.0], + dzb = ones(nz), + dqf = ones(nz), + # Expected solutions + dGb = [3.0], + dhb = [-1.0], + ) + return +end + +function test_differentiating_lp() + # refered from - https://en.wikipedia.org/wiki/Simplex_algorithm#Example + # max 2x + 3y + 4z + # s.t. 3x+2y+z <= 10 + # 2x+5y+3z <= 15 + # x,y,z >= 0 + nz = 3 + qp_test_with_solutions( + SCS.Optimizer; + q = [-2.0, -3.0, -4.0], + G = [ + 3.0 2.0 1.0 + 2.0 5.0 3.0 + -1.0 0.0 0.0 + 0.0 -1.0 0.0 + 0.0 0.0 -1.0 + ], + h = [10.0, 15.0, 0.0, 0.0, 0.0], + dzb = ones(nz), + dqf = ones(nz), + # Expected solutions + dqb = zeros(nz), + dGb = [ + 0.0 0.0 0.0 + 0.0 0.0 -5/3 + 0.0 0.0 5/3 + 0.0 0.0 -10/3 + 0.0 0.0 0.0 + ], + dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0], + ) + return +end + +function test_differentiating_LP_with_variable_bounds() + # max 2x + 3y + 4z + # s.t. 3x+2y+z <= 10 + # 2x+5y+3z <= 15 + # x ≤ 3 + # 0 ≤ y ≤ 2 + # z ≥ 2 + # x,y,z >= 0 + # variant of previous test with same solution + nz = 3 + qp_test_with_solutions( + HiGHS.Optimizer; + q = [-2.0, -3.0, -4.0], + G = [ + 3.0 2.0 1.0 + 2.0 5.0 3.0 + -1.0 0.0 0.0 + 0.0 -1.0 0.0 + 0.0 0.0 -1.0 + 1.0 0.0 0.0 + 0.0 1.0 0.0 + 0.0 0.0 1.0 + ], + h = [10.0, 15.0, 0.0, 0.0, 0.0, 3.0, 2.0, 6.0], + dzb = ones(nz), + dqf = ones(nz), + # Expected solutions + dqb = zeros(nz), + dGb = [ + 0.0 0.0 0.0 + 0.0 0.0 -5/3 + 0.0 0.0 5/3 + 0.0 0.0 -10/3 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + ], + dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0, 0.0, 0.0, 0.0], + ) + return +end + +function test_differentiating_LP_with_variable_bounds_2() + nz = 3 + qp_test_with_solutions( + HiGHS.Optimizer; + q = [-2.0, -3.0, -4.0], + G = [ + 3.0 2.0 1.0 + 2.0 5.0 3.0 + 0.0 -1.0 0.0 + 0.0 0.0 -1.0 + ], + h = [10.0, 15.0, 0.0, 0.0], + fix_indices = [1], + fix_values = [0.0], + dzb = ones(nz), + dqf = ones(nz), + # Expected solutions + dqb = zeros(nz), + dGb = [ + 0.0 0.0 0.0 + 0.0 0.0 -5/3 + 0.0 0.0 -10/3 + 0.0 0.0 0.0 + ], + dhb = [0.0, 1 / 3, 2 / 3, 0.0], + dAb = [0.0 0.0 -5 / 3], + dbb = [1 / 3], + ) + return +end + +""" +max 2x + 3y + 4z +s.t. 3x+2y+z <= 10 + 2x+5y+3z <= 15 + x ≤ 3 + 0 ≤ y ≤ 2 + 6 ≥ z ≥ -1 + x, y, z >= 0 +variant of previous test with same solution +""" +function test_differentiating_lp_with_saf_with_le_ge_constraints() + nz = 3 + qp_test_with_solutions( + HiGHS.Optimizer; + q = [-2.0, -3.0, -4.0], + G = [ + 3.0 2.0 1.0 + 2.0 5.0 3.0 + -1.0 0.0 0.0 + 0.0 -1.0 0.0 + 0.0 0.0 -1.0 + ], + h = [10.0, 15.0, 0.0, 0.0, 0.0], #5 + ub_indices = [1, 2, 3], + ub_values = [3.0, 2.0, 6.0], + lb_indices = [1], + lb_values = [-1.0], + dzb = ones(nz), + dqb = zeros(nz), + dGb = [ + 0.0 0.0 0.0 + 0.0 0.0 -5/3 + 0.0 0.0 5/3 + 0.0 0.0 -10/3 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 0.0 + ], + dhb = [0.0, 1 / 3, -1 / 3, 2 / 3, 0.0, 0.0, 0.0, 0.0, 0.0], + ) + return +end + +function test_differentiating_lp_with_nonactive_constraints() + # Issue #40 from Gurobi.jl + # min x + # s.t. x >= 0 + # x >= 3 + qp_test_with_solutions( + HiGHS.Optimizer; + q = [1.0], + G = -ones(2, 1), + h = [0.0, -3.0], + dzb = -ones(1), + dhf = [0.0, 1.0], + # Expected solutions + z = [3.0], + λ = [0.0, 1.0], + dzf = -ones(1), + dλb = zeros(2), + dhb = [0.0, 1.0], + ∇zb = zeros(1), + ∇λb = [0.0, -1.0], + dλf = zeros(2), + ) + return +end + +end # module + +TestQuadraticProgram.runtests() diff --git a/test/runtests.jl b/test/runtests.jl index c4b9c4ec..70844576 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,77 +4,10 @@ # in the LICENSE.md file or at https://opensource.org/licenses/MIT. using Test -import DiffOpt -import Random -import LinearAlgebra -import DelimitedFiles -import MathOptInterface as MOI -const MOIT = MOI.Test -const ATOL = 2e-4 -const RTOL = 2e-4 - -include("bridges.jl") - -@testset "MOI_wrapper" begin - @testset "Utils" begin - include("utils.jl") - end - @testset "MOI_wrapper main" begin - include("moi_wrapper.jl") - end - @testset "QP fwd" begin - include("qp_forward.jl") - end - @testset "Conic reverse" begin - include("conic_reverse.jl") - end -end - -@testset "JuMP wrapper" begin - include("jump.jl") -end - -@testset "Solver Interface" begin - include("solver_interface.jl") -end - -@testset "Singular error with deleted variables: Sensitivity index issue" begin - include("singular_exception.jl") -end - -@testset "Examples" begin - @testset "autotuning-ridge" begin - include(joinpath(@__DIR__, "../docs/src/examples/autotuning-ridge.jl")) - end - @testset "chainrules_unit" begin - include(joinpath(@__DIR__, "../docs/src/examples/chainrules_unit.jl")) - end - @testset "custom-relu" begin - include(joinpath(@__DIR__, "../docs/src/examples/custom-relu.jl")) # needs downloads - end - @testset "matrix-inversion-manual" begin - include( - joinpath( - @__DIR__, - "../docs/src/examples/matrix-inversion-manual.jl", - ), - ) - end - @testset "sensitivity-analysis-ridge" begin - include( - joinpath( - @__DIR__, - "../docs/src/examples/sensitivity-analysis-ridge.jl", - ), - ) - end - @testset "sensitivity-analysis-svm" begin - include( - joinpath( - @__DIR__, - "../docs/src/examples/sensitivity-analysis-svm.jl", - ), - ) +@testset "$file" for file in readdir(@__DIR__) + if !endswith(file, ".jl") || file in ("runtests.jl", "utils.jl") + continue end + include(file) end diff --git a/test/sensitivity_index_issue.jl b/test/sensitivity_index_issue.jl deleted file mode 100644 index 6eedf09d..00000000 --- a/test/sensitivity_index_issue.jl +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -using JuMP -import HiGHS - -function DiffOptModel() - model = DiffOpt.diff_optimizer(HiGHS.Optimizer) - MOI.set(model, DiffOpt.ProgramClass(), DiffOpt.QUADRATIC) - return model -end - -#TEST OF DIFFOPT -@testset "Sensitivity index issue" begin - testModel = Model(() -> DiffOptModel()) - #Variables - @variable(testModel, v[i in 1:1]) - @variable(testModel, u[i in 1:1]) - @variable(testModel, g[j in 1:3]) - @variable(testModel, a) - #Contraints - @constraint(testModel, v_cons_min, v[1] >= 0.0) - @constraint(testModel, u_cons_min, u[1] >= 0.0) - @constraint(testModel, G1_cons_min, g[1] >= 0.0) - @constraint(testModel, G2_cons_min, g[2] >= 0.0) - @constraint(testModel, G3_cons_min, g[3] >= 0.0) - @constraint(testModel, a_cons_min, a >= 0.0) - - @constraint(testModel, v_cons_max, v[1] <= 50.0) - @constraint(testModel, G1_cons_max, g[1] <= 15.0) - @constraint(testModel, G2_cons_max, g[2] <= 20.0) - @constraint(testModel, G3_cons_max, g[3] <= 25.0) - - @constraint(testModel, future_constraint, v[1] + a >= 50) - - @constraint(testModel, hidro_conservation[i in 1:1], v[1] + u[i] == 45.0) - @constraint(testModel, demand_constraint, g[1] + g[2] + g[3] + u[1] == 100) - - #Stage objective - @objective(testModel, Min, 3.0 * g[1] + 5.0 * g[2] + 7.0 * g[3] + a) - - #Calculation of sensitivities by Manual KKT - - #v,u,g1,g2,g3,a - A = [ - 1.0 1.0 0.0 0.0 0.0 0.0 - 0.0 1.0 1.0 1.0 1.0 0.0 - ] - #rhs - b = [ - 45.0 - 100.0 - ] - #v,u,g1,g2,g3,a - G = [ - -1.0 0.0 0.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 0.0 0.0 - 0.0 0.0 -1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 -1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 -1.0 0.0 - 0.0 0.0 0.0 0.0 0.0 -1.0 - 1.0 0.0 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 1.0 0.0 - -1.0 0.0 0.0 0.0 0.0 -1.0 - ] - h = [#rhs - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 50.0 - 15.0 - 20.0 - 25.0 - -50.0 - ] - optimize!(testModel) - lambda = [ - JuMP.dual.(testModel[:v_cons_min]) - JuMP.dual.(testModel[:u_cons_min]) - JuMP.dual.(testModel[:G1_cons_min]) - JuMP.dual.(testModel[:G2_cons_min]) - JuMP.dual.(testModel[:G3_cons_min]) - JuMP.dual.(testModel[:a_cons_min]) - JuMP.dual.(testModel[:v_cons_max]) - JuMP.dual.(testModel[:G1_cons_max]) - JuMP.dual.(testModel[:G2_cons_max]) - JuMP.dual.(testModel[:G3_cons_max]) - JuMP.dual.(testModel[:future_constraint]) - ] - lambda = abs.(lambda) - z = [ - JuMP.value.(testModel[:v])[1] - JuMP.value.(testModel[:u])[1] - JuMP.value.(testModel[:g])[1] - JuMP.value.(testModel[:g])[2] - JuMP.value.(testModel[:g])[3] - JuMP.value.(testModel[:a]) - ] - - Q = zeros(6, 6) - D_lambda = diagm(lambda) - D_Gz_h = diagm(G * z - h) - - KKT = [ - Q (G') (A') - diagm(lambda)*G diagm(G * z - h) zeros(11, 2) - A zeros(2, 11) zeros(2, 2) - ] - rhsKKT = [ - zeros(6, 11) zeros(6, 2) - diagm(lambda) zeros(11, 2) - zeros(2, 11) diagm(ones(2)) - ] - - derivativeKKT = - hcat([DiffOpt.lsqr(KKT, rhsKKT[:, i]) for i in 1:size(rhsKKT)[2]]...) - - dprimal_dconsKKT = derivativeKKT[1:6, :] - #Finished calculation of sensitivities by Manual KKT - - #Calculation of sensitivities by DiffOpt - xRef = [ - testModel[:v_cons_min] - testModel[:u_cons_min] - testModel[:G1_cons_min] - testModel[:G2_cons_min] - testModel[:G3_cons_min] - testModel[:a_cons_min] - testModel[:v_cons_max] - testModel[:G1_cons_max] - testModel[:G2_cons_max] - testModel[:G3_cons_max] - testModel[:future_constraint] - testModel[:hidro_conservation] - testModel[:demand_constraint] - ] - yRef = [ - testModel[:v] - testModel[:u] - testModel[:g] - testModel[:a] - ] - dprimal_dcons = Array{Float64,2}(undef, length(yRef), length(xRef)) - for i in 1:length(xRef) - constraint_equation = convert(MOI.ScalarAffineFunction{Float64}, 1.0) - MOI.set( - testModel, - DiffOpt.ForwardConstraintFunction(), - xRef[i], - constraint_equation, - ) - DiffOpt.forward_differentiate!(testModel) - dprimal_dcons[:, i] .= - MOI.get.(testModel, DiffOpt.ForwardVariablePrimal(), yRef) - constraint_equation = convert(MOI.ScalarAffineFunction{Float64}, 0.0) - MOI.set( - testModel, - DiffOpt.ForwardConstraintFunction(), - xRef[i], - constraint_equation, - ) - end - - @testset "Sensitivities Result" begin - #The result given by Manual KKT needs to invert sign in some values to match the constraints. - #The result given by DiffOpt needs to invert sign to be in the right side of the equation. - @test -dprimal_dcons[:, 1] ≈ -dprimal_dconsKKT[:, 1] atol = ATOL - @test -dprimal_dcons[:, 2] ≈ -dprimal_dconsKKT[:, 2] atol = ATOL - @test -dprimal_dcons[:, 3] ≈ -dprimal_dconsKKT[:, 3] atol = ATOL - @test -dprimal_dcons[:, 4] ≈ -dprimal_dconsKKT[:, 4] atol = ATOL - @test -dprimal_dcons[:, 5] ≈ -dprimal_dconsKKT[:, 5] atol = ATOL - @test -dprimal_dcons[:, 6] ≈ -dprimal_dconsKKT[:, 6] atol = ATOL - @test -dprimal_dcons[:, 7] ≈ dprimal_dconsKKT[:, 7] atol = ATOL - @test -dprimal_dcons[:, 8] ≈ dprimal_dconsKKT[:, 8] atol = ATOL - @test -dprimal_dcons[:, 9] ≈ dprimal_dconsKKT[:, 9] atol = ATOL - @test -dprimal_dcons[:, 10] ≈ dprimal_dconsKKT[:, 10] atol = ATOL - @test -dprimal_dcons[:, 11] ≈ -dprimal_dconsKKT[:, 11] atol = ATOL - @test -dprimal_dcons[:, 12] ≈ dprimal_dconsKKT[:, 12] atol = ATOL - @test -dprimal_dcons[:, 13] ≈ dprimal_dconsKKT[:, 13] atol = ATOL - end - @testset "Primal Results" begin - @test JuMP.value.(testModel[:g]) == [15.0, 20.0, 20.0] - @test JuMP.value.(testModel[:v]) == [0.0] - @test JuMP.value.(testModel[:u]) == [45.0] - @test JuMP.value(testModel[:a]) == 50.0 - end -end diff --git a/test/singular_exception.jl b/test/singular_exception.jl deleted file mode 100644 index c1188574..00000000 --- a/test/singular_exception.jl +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -using Test -import DiffOpt -import MathOptInterface as MOI -import Ipopt - -Q = [ - 4.0 1.0 - 1.0 2.0 -] -q = [1.0, 1.0] -G = [1.0 1.0] -h = [-1.0] - -model = DiffOpt.diff_optimizer(Ipopt.Optimizer) -MOI.set(model, MOI.Silent(), true) -x = MOI.add_variables(model, 2) - -# define objective -quad_terms = MOI.ScalarQuadraticTerm{Float64}[] -for i in 1:2 - for j in i:2 # indexes (i,j), (j,i) will be mirrored. specify only one kind - push!(quad_terms, MOI.ScalarQuadraticTerm(Q[i, j], x[i], x[j])) - end -end -objective_function = - MOI.ScalarQuadraticFunction(quad_terms, MOI.ScalarAffineTerm.(q, x), 0.0) -MOI.set( - model, - MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), - objective_function, -) -MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - -# add constraint -c = MOI.add_constraint( - model, - MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(G[1, :], x), 0.0), - MOI.LessThan(h[1]), -) -MOI.optimize!(model) - -@test MOI.get(model, MOI.VariablePrimal(), x) ≈ [-0.25; -0.75] atol = ATOL rtol = - RTOL - -@test model.diff === nothing -MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) -DiffOpt.reverse_differentiate!(model) - -grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) -@test grad_wrt_h ≈ -1.0 atol = 2ATOL rtol = RTOL -@test model.diff !== nothing - -# adding two variables invalidates the cache -y = MOI.add_variables(model, 2) -for yi in y - MOI.delete(model, yi) -end -@test model.diff === nothing -MOI.optimize!(model) - -MOI.set.(model, DiffOpt.ReverseVariablePrimal(), x, ones(2)) -DiffOpt.reverse_differentiate!(model) - -grad_wrt_h = - MOI.constant(MOI.get(model, DiffOpt.ReverseConstraintFunction(), c)) - -@test grad_wrt_h ≈ -1.0 atol = 1e-3 -@test model.diff !== nothing diff --git a/test/solver_interface.jl b/test/solver_interface.jl deleted file mode 100644 index e0601dca..00000000 --- a/test/solver_interface.jl +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2020: Akshay Sharma and contributors -# -# Use of this source code is governed by an MIT-style license that can be found -# in the LICENSE.md file or at https://opensource.org/licenses/MIT. - -import HiGHS - -@testset "FEASIBILITY_SENSE zeros objective" begin - model = DiffOpt.diff_optimizer(HiGHS.Optimizer) - MOI.set(model, MOI.Silent(), true) - x = MOI.add_variable(model) - MOI.add_constraint(model, x, MOI.GreaterThan(1.0)) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x) - - MOI.optimize!(model) - @test MOI.get(model, MOI.ObjectiveValue()) ≈ 1.0 atol = ATOL rtol = RTOL - - MOI.set(model, MOI.ObjectiveSense(), MOI.FEASIBILITY_SENSE) - MOI.optimize!(model) - @test MOI.get(model, MOI.ObjectiveValue()) == 0.0 -end - -@testset "Forward or reverse without optimizing throws" begin - model = DiffOpt.diff_optimizer(HiGHS.Optimizer) - MOI.set(model, MOI.Silent(), true) - x = MOI.add_variable(model) - MOI.add_constraint(model, x, MOI.GreaterThan(1.0)) - MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) - MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x) - # do not optimize, just try to differentiate - @test_throws ErrorException DiffOpt.forward_differentiate!(model) - @test_throws ErrorException DiffOpt.reverse_differentiate!(model) - # impossible constraint - MOI.add_constraint(model, x, MOI.LessThan(0.5)) - MOI.optimize!(model) - @test_throws ErrorException DiffOpt.forward_differentiate!(model) - @test_throws ErrorException DiffOpt.reverse_differentiate!(model) -end - -struct TestSolver end - -# always use IterativeSolvers -function DiffOpt.QuadraticProgram.solve_system( - ::TestSolver, - LHS, - RHS, - iterative::Bool, -) - return IterativeSolvers.lsqr(LHS, RHS) -end - -@testset "Setting the linear solver in the quadratic solver" begin - model = DiffOpt.QuadraticProgram.Model() - @test MOI.supports(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) - @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) === - nothing - MOI.set(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver(), TestSolver()) - @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) == - TestSolver() - MOI.empty!(model) - @test MOI.get(model, DiffOpt.QuadraticProgram.LinearAlgebraSolver()) == - TestSolver() -end diff --git a/test/utils.jl b/test/utils.jl index ff350079..8a4fc77c 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -8,7 +8,8 @@ using JuMP import DiffOpt import MathOptInterface as MOI import LinearAlgebra: ⋅ -import SparseArrays: sparse +import LinearAlgebra +import SparseArrays macro _test(computed, expected::Symbol) exp = esc(expected) @@ -29,7 +30,9 @@ end Check our results for QPs using the notations of [AK17]. -[AK17] Amos, Brandon, and J. Zico Kolter. "Optnet: Differentiable optimization as a layer in neural networks." International Conference on Machine Learning. PMLR, 2017. https://arxiv.org/pdf/1703.00443.pdf +[AK17] Amos, Brandon, and J. Zico Kolter. "Optnet: Differentiable optimization +as a layer in neural networks." International Conference on Machine Learning. +PMLR, 2017. https://arxiv.org/pdf/1703.00443.pdf """ function qp_test( solver, @@ -135,7 +138,7 @@ function qp_test( MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE) if iszero(Q) - obj = q ⋅ v + obj = LinearAlgebra.dot(q, v) else obj = LinearAlgebra.dot(v, Q / 2, v) + LinearAlgebra.dot(q, v) end @@ -318,6 +321,7 @@ function qp_test( pprod = dQf ⋅ dQb + dqf ⋅ dqb + dGf ⋅ dGb + dhf ⋅ dhb + dAf ⋅ dAb + dbf ⋅ dbb @test pprod ≈ pprod atol = ATOL rtol = RTOL + return end function qp_test(solver, lt, set_zero, canonicalize; kws...) @@ -327,6 +331,7 @@ function qp_test(solver, lt, set_zero, canonicalize; kws...) ] qp_test(solver, diff_model, lt, set_zero, canonicalize; kws...) end + return end function qp_test(solver; kws...) @@ -347,6 +352,7 @@ function qp_test(solver; kws...) end end end + return end function qp_test_with_solutions( @@ -427,4 +433,5 @@ function qp_test_with_solutions( kws..., ) end + return end