Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/FullyConnected.jl
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,8 @@ function MLJFlux.train!(
return training_loss / n_batches
end

function train!(model, loss, opt_state, X, Y; batchsize=32, shuffle=true)
function train!(model, loss, opt_state, X, Y; _batchsize=32, shuffle=true)
batchsize = min(size(X, 2), _batchsize)
X = X |> gpu
Y = Y |> gpu
data = Flux.DataLoader((X, Y),
Expand Down
4 changes: 3 additions & 1 deletion src/L2O.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ export ArrowFile,
make_convex,
ConvexRule,
relative_rmse,
relative_mae
relative_mae,
inconvexhull

include("datasetgen.jl")
include("csvrecorder.jl")
Expand All @@ -46,5 +47,6 @@ include("worst_case_iter.jl")
include("FullyConnected.jl")
include("nn_expression.jl")
include("metrics.jl")
include("inconvexhull.jl")

end
33 changes: 33 additions & 0 deletions src/inconvexhull.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
"""

inconvexhull(training_set::Matrix{Float64}, test_set::Matrix{Float64})

Check if new points are inside the convex hull of the given points. Solves a linear programming problem to check if the points are inside the convex hull.
"""
function inconvexhull(training_set::Matrix{Float64}, test_set::Matrix{Float64}, solver)
# Get the number of points and dimensions
n, d = size(training_set)
m, d = size(test_set)

# Create the model
model = JuMP.Model(solver)

# Create the variables
@variable(model, lambda[1:n, 1:m] >= 0)
@constraint(model, convex_combination[i=1:m], sum(lambda[j, i] for j in 1:n) == 1)

# slack variables
@variable(model, slack[1:m] >= 0)

# Create the constraints
@constraint(model, in_convex_hull[i=1:m, k=1:d], sum(lambda[j, i] * training_set[j, k] for j in 1:n) == test_set[i, k] + slack[i])

# Create the objective
@objective(model, Min, sum(slack[i] for i in 1:m))

# solve the model
optimize!(model)

# return if the points are inside the convex hull
return isapprox.(value.(slack), 0)
end
17 changes: 17 additions & 0 deletions test/inconvexhull.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
"""
test_inconvexhull()

Test the inconvexhull function: inconvexhull(training_set::Matrix{Float64}, test_set::Matrix{Float64})
"""
function test_inconvexhull()
@testset "inconvexhull" begin
# Create the training set
training_set = [0. 0; 1 0; 0 1; 1 1]

# Create the test set
test_set = [0.5 0.5; -0.5 0.5; 0.5 -0.5; 0.0 0.5]

# Test the inconvexhull function
@test inconvexhull(training_set, test_set, HiGHS.Optimizer) == [true, false, false, true]
end
end
61 changes: 0 additions & 61 deletions test/nn_expression.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,64 +28,3 @@ function test_flux_jump_basic()
end
end
end

"""
test_FullyConnected_jump()

Tests running a jump model with a FullyConnected Network expression.
"""
function test_FullyConnected_jump()
for i in 1:10
X = rand(100, 3)
Y = rand(100, 1)

nn = MultitargetNeuralNetworkRegressor(;
builder=FullyConnectedBuilder([8, 8, 8]),
rng=123,
epochs=100,
optimiser=optimiser,
acceleration=CUDALibs(),
batch_size=32,
)

mach = machine(nn, X, y)
fit!(mach; verbosity=2)

flux_model = mach.fitresult[1]

model = JuMP.Model(Gurobi.Optimizer)

@variable(model, x[i = 1:3]>= 2.3)

ex = flux_model(x)[1]

# @constraint(model, ex >= -100.0)
@constraint(model, sum(x) <= 10)

@objective(model, Min, ex)

JuMP.optimize!(model)

@test termination_status(model) === OPTIMAL
if flux_model(value.(x))[1] <= 1.0
@test isapprox(flux_model(value.(x))[1], value(ex); atol=0.01)
else
@test isapprox(flux_model(value.(x))[1], value(ex); rtol=0.001)
end
end
end

function print_conflict!(model)
JuMP.compute_conflict!(model)
ctypes = list_of_constraint_types(model)
for (F, S) in ctypes
cons = all_constraints(model, F, S)
for i in eachindex(cons)
isassigned(cons, i) || continue
con = cons[i]
cst = MOI.get(model, MOI.ConstraintConflictStatus(), con)
cst == MOI.IN_CONFLICT && @info JuMP.name(con) con
end
end
return nothing
end
8 changes: 7 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ using Flux
using MLJ
using CSV
using DataFrames
using Optimisers

using NonconvexNLopt

Expand All @@ -29,9 +30,14 @@ include(joinpath(test_dir, "test_flux_forecaster.jl"))

include(joinpath(test_dir, "nn_expression.jl"))

include(joinpath(test_dir, "inconvexhull.jl"))

@testset "L2O.jl" begin
test_fully_connected()
test_flux_jump_basic()
test_inconvexhull()

mktempdir() do path
test_flux_jump_basic()
test_problem_iterator(path)
test_worst_case_problem_iterator(path)
file_in, file_out = test_pglib_datasetgen(path, "pglib_opf_case5_pjm", 20)
Expand Down
17 changes: 17 additions & 0 deletions test/test_flux_forecaster.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,20 @@ function test_flux_forecaster(file_in::AbstractString, file_out::AbstractString)
rm(file_out)
end
end

# Test the Flux.jl forecaster outside MLJ.jl
function test_fully_connected(;num_samples::Int=100, num_features::Int=10)
X = rand(num_features, num_samples)
y = rand(1, num_samples)

model = FullyConnected(10, [10, 10], 1)

# Train the model
optimizer = Optimisers.Adam()
opt_state = Optimisers.setup(optimizer, model)
L2O.train!(model, Flux.mse, opt_state, X, y)

# Make predictions
predictions = model(X)
@test predictions isa Array
end