From ba6f0a1803a3647e66cf8455e227edfceb27a450 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Thu, 9 Nov 2023 17:09:58 -0500 Subject: [PATCH 01/74] overloads --- Project.toml | 3 +- examples/flux/HuggingFaceDatasets.jl | 39 ++++++ examples/flux/flux_forecaster_script.jl | 18 ++- examples/powermodels/power_flow_dataset.jl | 71 +++++++++++ examples/powermodels/powermodels.jl | 141 +++++++++++++++++++++ src/nn_expression.jl | 64 ++++++++++ 6 files changed, 329 insertions(+), 7 deletions(-) create mode 100644 examples/flux/HuggingFaceDatasets.jl create mode 100644 examples/powermodels/power_flow_dataset.jl create mode 100644 examples/powermodels/powermodels.jl create mode 100644 src/nn_expression.jl diff --git a/Project.toml b/Project.toml index e7c94ec..e060a16 100644 --- a/Project.toml +++ b/Project.toml @@ -11,6 +11,7 @@ Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845" +NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978" ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -32,11 +33,11 @@ DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" NonconvexNLopt = "b43a31b8-ff9b-442d-8e31-c163daa8ab75" PGLib = "07a8691f-3d11-4330-951b-3c50f98338be" PowerModels = "c36e90e8-916a-50a6-bd94-075b64ef4655" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7" [targets] test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "DataFrames", "Clarabel", "Ipopt", "NonconvexNLopt", "MLJ"] diff --git a/examples/flux/HuggingFaceDatasets.jl b/examples/flux/HuggingFaceDatasets.jl new file mode 100644 index 0000000..2133064 --- /dev/null +++ b/examples/flux/HuggingFaceDatasets.jl @@ -0,0 +1,39 @@ +using PyCall +using Conda +# Conda.add("huggingface_hub") + +huggingface_hub = pyimport("huggingface_hub") + +huggingface_hub.login(token=ENV["HUGGINGFACE_TOKEN"]) + +function download_dataset(organization, dataset, case_name, io_type; formulation="", cache_dir="./data/") + dataset_url = joinpath(organization, dataset) + if io_type ∉ ["input", "output"] + throw(ArgumentError("io_type must be 'input' or 'output'.")) + end + + if io_type == "input" + data_path = joinpath(case_name, "input") + else + if formulation == "" + throw(ArgumentError("Formulation must be specified for 'output' data.")) + end + data_path = joinpath(case_name, "output", formulation) + end + + # Fetch the dataset from the provided URL + huggingface_hub.snapshot_download(dataset_url, allow_patterns=["$data_path/*.arrow"], local_dir=cache_dir, repo_type="dataset", local_dir_use_symlinks=false) + + return nothing +end + +cache_dir="./examples/powermodels/data/" +organization = "L2O" +dataset = "pglib_opf_solves" +case_name = "pglib_opf_case300_ieee" +formulation = "DCPPowerModel" +io_type = "input" +download_dataset(organization, dataset, case_name, io_type; cache_dir=cache_dir) + +io_type = "output" +download_dataset(organization, dataset, case_name, io_type; formulation=formulation , cache_dir=cache_dir) \ No newline at end of file diff --git a/examples/flux/flux_forecaster_script.jl b/examples/flux/flux_forecaster_script.jl index c69a70b..9594b74 100644 --- a/examples/flux/flux_forecaster_script.jl +++ b/examples/flux/flux_forecaster_script.jl @@ -3,26 +3,32 @@ TestEnv.activate() using Arrow using CSV +using MLJFlux using Flux +using MLJ using DataFrames using PowerModels using L2O # Paths case_name = "pglib_opf_case300_ieee" # pglib_opf_case300_ieee # pglib_opf_case5_pjm -network_formulation = SOCWRConicPowerModel # SOCWRConicPowerModel # DCPPowerModel +network_formulation = DCPPowerModel # SOCWRConicPowerModel # DCPPowerModel filetype = ArrowFile # ArrowFile # CSVFile path_dataset = joinpath(pwd(), "examples", "powermodels", "data") -case_file_path = joinpath(path_dataset, case_name, string(network_formulation)) +case_file_path = joinpath(path_dataset, case_name) +case_file_path_output = joinpath(case_file_path, "output", string(network_formulation)) +case_file_path_input = joinpath(case_file_path, "input") # Load input and output data tables -iter_files = readdir(joinpath(case_file_path)) -iter_files = filter(x -> occursin(string(filetype), x), iter_files) +iter_files_in = readdir(joinpath(case_file_path_input)) +iter_files_in = filter(x -> occursin(string(filetype), x), iter_files_in) file_ins = [ - joinpath(case_file_path, file) for file in iter_files if occursin("input", file) + joinpath(case_file_path_input, file) for file in iter_files_in if occursin("input", file) ] +iter_files_out = readdir(joinpath(case_file_path_output)) +iter_files_out = filter(x -> occursin(string(filetype), x), iter_files_out) file_outs = [ - joinpath(case_file_path, file) for file in iter_files if occursin("output", file) + joinpath(case_file_path_output, file) for file in iter_files_out if occursin("output", file) ] batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl new file mode 100644 index 0000000..20a4db5 --- /dev/null +++ b/examples/powermodels/power_flow_dataset.jl @@ -0,0 +1,71 @@ +using TestEnv +TestEnv.activate() + +using PowerModels +using PGLib +using MLJFlux +using Gurobi +using Flux +using MLJ +using L2O + +matpower_case_name = "pglib_opf_case5_pjm" + +network_data = make_basic_network(pglib(matpower_case_name)) + +branch = network_data["branch"]["1"] +f_bus_index = branch["f_bus"] +f_bus = network_data["bus"]["$f_bus_index"] +t_bus_index = branch["t_bus"] +t_bus = network_data["bus"]["$t_bus_index"] + +f_owms = function_ohms_yt_from(branch) + +num_samples = 1000 + +vm_fr, vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"]), rand(t_bus["vmin"]:0.0001:t_bus["vmax"]) +va_fr, va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) +a_diff = va_fr - va_to + +using Plots +f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) +plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); +scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); + +# Define Model +model = MultitargetNeuralNetworkRegressor(; + builder=FullyConnectedBuilder([32, 64]), + rng=123, + epochs=1000, + optimiser=Flux.Optimise.Adam(), +) + +# Define the machine +_vm_fr, _vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"], num_samples), rand(t_bus["vmin"]:0.0001:t_bus["vmax"], num_samples) +_va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) +mach = machine(model, [_vm_fr _vm_to _va_fr _va_to], vcat([[i[1] i[2]] for i in f_owms_val]...)) +fit!(mach; verbosity=2, force=true) + +# Make predictions +predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) + +scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); +scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") + +function function_ohms_yt_from(::Dict) + return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) +end + +function function_ohms_yt_to(branch::Dict) + return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) +end + +pm = instantiate_model( + network_data, + ACPPowerModel, + PowerModels.build_opf; + setting=Dict("output" => Dict("branch_flows" => true, "duals" => true)), +) + +# solve +result = optimize_model!(pm, optimizer=Gurobi.Optimizer) \ No newline at end of file diff --git a/examples/powermodels/powermodels.jl b/examples/powermodels/powermodels.jl new file mode 100644 index 0000000..a221dca --- /dev/null +++ b/examples/powermodels/powermodels.jl @@ -0,0 +1,141 @@ +import PowerModels: constraint_ohms_yt_from, constraint_ohms_yt_to, constraint_ohms_y_from, constraint_ohms_y_to + +function _function_ohms_yt_from(branch::Dict) + g, b = calc_branch_y(branch) + tr, ti = calc_branch_t(branch) + g_fr = branch["g_fr"] + b_fr = branch["b_fr"] + tm = branch["tap"] + + return (vm_fr, vm_to, va_fr, va_to) -> ((g+g_fr)/tm^2*vm_fr^2 + (-g*tr+b*ti)/tm^2*(vm_fr*vm_to*cos(va_fr-va_to)) + (-b*tr-g*ti)/tm^2*(vm_fr*vm_to*sin(va_fr-va_to)), # from + -(b+b_fr)/tm^2*vm_fr^2 - (-b*tr-g*ti)/tm^2*(vm_fr*vm_to*cos(va_fr-va_to)) + (-g*tr+b*ti)/tm^2*(vm_fr*vm_to*sin(va_fr-va_to))) # to + +end + +function function_ohms_yt_from(branch::Dict) + _function_ohms_yt_from(branch) +end + +function PowerModels.constraint_ohms_yt_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) + branch = ref(pm, nw, :branch, i) + f_bus = branch["f_bus"] + t_bus = branch["t_bus"] + f_idx = (i, f_bus, t_bus) + + p_fr = var(pm, nw, :p, f_idx) + q_fr = var(pm, nw, :q, f_idx) + vm_fr = var(pm, nw, :vm, f_bus) + vm_to = var(pm, nw, :vm, t_bus) + va_fr = var(pm, nw, :va, f_bus) + va_to = var(pm, nw, :va, t_bus) + + f_owms = function_ohms_yt_from(branch) + f_owms_p, f_owms_q = f_owms(vm_fr, vm_to, va_fr, va_to) + + #constraint_ohms_yt_from(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_fr, b_fr, tr, ti, tm) + JuMP.@constraint(pm.model, p_fr == f_owms_p) # @NL + JuMP.@constraint(pm.model, q_fr == f_owms_q) # @NL +end + +function _function_ohms_yt_to(branch::Dict) + g, b = calc_branch_y(branch) + tr, ti = calc_branch_t(branch) + g_to = branch["g_to"] + b_to = branch["b_to"] + tm = branch["tap"] + + return (vm_fr, vm_to, va_fr, va_to) -> ((g+g_to)*vm_to^2 + (-g*tr-b*ti)/tm^2*(vm_to*vm_fr*cos(va_to-va_fr)) + (-b*tr+g*ti)/tm^2*(vm_to*vm_fr*sin(va_to-va_fr)), # from + -(b+b_to)*vm_to^2 - (-b*tr+g*ti)/tm^2*(vm_to*vm_fr*cos(va_to-va_fr)) + (-g*tr-b*ti)/tm^2*(vm_to*vm_fr*sin(va_to-va_fr))) # to + +end + +function function_ohms_yt_to(branch::Dict) + _function_ohms_yt_to(branch) +end + +function PowerModels.constraint_ohms_yt_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) + branch = ref(pm, nw, :branch, i) + t_bus = branch["t_bus"] + f_bus = branch["f_bus"] + t_idx = (i, t_bus, f_bus) + + p_to = var(pm, nw, :p, t_idx) + q_to = var(pm, nw, :q, t_idx) + vm_fr = var(pm, nw, :vm, f_bus) + vm_to = var(pm, nw, :vm, t_bus) + va_fr = var(pm, nw, :va, f_bus) + va_to = var(pm, nw, :va, t_bus) + + f_owms = function_ohms_yt_to(branch) + f_owms_p, f_owms_q = f_owms(vm_fr, vm_to, va_fr, va_to) + + # constraint_ohms_yt_to(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_to, b_to, tr, ti, tm) + JuMP.@constraint(pm.model, p_to == f_owms_p) # @NL + JuMP.@constraint(pm.model, q_to == f_owms_q) # @NL +end + +# function function_ohms_y_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) +# branch = ref(pm, nw, :branch, i) +# f_bus = branch["f_bus"] +# t_bus = branch["t_bus"] +# f_idx = (i, f_bus, t_bus) +# t_idx = (i, t_bus, f_bus) + +# g, b = calc_branch_y(branch) +# g_fr = branch["g_fr"] +# b_fr = branch["b_fr"] +# tm = branch["tap"] +# ta = branch["shift"] + +# return (vm_fr, vm_to, va_fr, va_to) -> (g+g_fr)*(vm_fr/tm)^2 - g*vm_fr/tm*vm_to*cos(va_fr-va_to-ta) + -b*vm_fr/tm*vm_to*sin(va_fr-va_to-ta), # from +# (vm_fr, vm_to, va_fr, va_to) -> -(b+b_fr)*(vm_fr/tm)^2 + b*vm_fr/tm*vm_to*cos(va_fr-va_to-ta) + -g*vm_fr/tm*vm_to*sin(va_fr-va_to-ta) # to + +# end + +# function constraint_ohms_y_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) +# p_fr = var(pm, nw, :p, f_idx) +# q_fr = var(pm, nw, :q, f_idx) +# vm_fr = var(pm, nw, :vm, f_bus) +# vm_to = var(pm, nw, :vm, t_bus) +# va_fr = var(pm, nw, :va, f_bus) +# va_to = var(pm, nw, :va, t_bus) + +# f_owms = function_ohms_y_from(pm, i; nw=nw) + +# # constraint_ohms_y_from(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_fr, b_fr, tm, ta) +# JuMP.constraint(pm.model, p_fr == f_owms[1] # @NL(vm_fr, vm_to, va_fr, va_to)) +# JuMP.constraint(pm.model, q_fr == f_owms[2] # @NL(vm_fr, vm_to, va_fr, va_to)) +# end + +# function function_ohms_y_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) +# branch = ref(pm, nw, :branch, i) +# f_bus = branch["f_bus"] +# t_bus = branch["t_bus"] +# f_idx = (i, f_bus, t_bus) +# t_idx = (i, t_bus, f_bus) + +# g, b = calc_branch_y(branch) +# g_to = branch["g_to"] +# b_to = branch["b_to"] +# tm = branch["tap"] +# ta = branch["shift"] + +# return (vm_fr, vm_to, va_fr, va_to) -> (g+g_to)*vm_to^2 - g*vm_to*vm_fr/tm*cos(va_to-va_fr+ta) + -b*vm_to*vm_fr/tm*sin(va_to-va_fr+ta), # from +# (vm_fr, vm_to, va_fr, va_to) -> -(b+b_to)*vm_to^2 + b*vm_to*vm_fr/tm*cos(va_to-va_fr+ta) + -g*vm_to*vm_fr/tm*sin(va_to-va_fr+ta) # to + +# end + +# function constraint_ohms_y_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) +# p_to = var(pm, nw, :p, t_idx) +# q_to = var(pm, nw, :q, t_idx) +# vm_fr = var(pm, nw, :vm, f_bus) +# vm_to = var(pm, nw, :vm, t_bus) +# va_fr = var(pm, nw, :va, f_bus) +# va_to = var(pm, nw, :va, t_bus) + +# f_owms = function_ohms_y_to(pm, i; nw=nw) + +# # constraint_ohms_y_to(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_to, b_to, tm, ta) +# JuMP.constraint(pm.model, p_to == f_owms[1] # @NL(vm_fr, vm_to, va_fr, va_to)) +# JuMP.constraint(pm.model, q_to == f_owms[2] # @NL(vm_fr, vm_to, va_fr, va_to)) +# end diff --git a/src/nn_expression.jl b/src/nn_expression.jl new file mode 100644 index 0000000..0ec8869 --- /dev/null +++ b/src/nn_expression.jl @@ -0,0 +1,64 @@ +using JuMP + +import NNlib: relu + +big_M = 1000.0 + +function NNlib.relu(ex::AffExpr) + model = owner_model(ex) + aux = @variable(model, binary = true) + relu_out = @variable(model, lower_bound = 0.0) + @constraint(model, relu_out >= ex) + @constraint(model, relu_out <= ex + big_M * (1 - aux)) + @constraint(model, relu_out <= big_M * aux) + return relu_out +end + +# function build_test_nlp_model() +# model = Model() + +# @variable(model, x[i = 1:2]); + +# @variable(model, y[i = 1:2] >= 0.0); + +# ex1 = sin(x[1]) +# ex2 = cos(x[2]) + +# cons = @NLconstraint(model, ex1 == ex2) + +# @objective(model, Min, sum(x) + sum(y)) + +# return model, cons +# end + +# function constraints_nlp_evaluator(model, x) +# d = NLPEvaluator(model) +# MOI.initialize(d, [:ExprGraph]) + +# f = zeros(length(model.nlp_model.constraints)) + +# MOI.eval_constraint(d, f, x) + +# return f +# end + +# model, cons = build_test_nlp_model() + +# constraints_nlp_evaluator(model, [1.0]) + +# ################ + +# flux_model = Chain(Dense(3, 3, relu), Dense(3, 1)) + +# ex = flux_model(x)[1] + +# cons = @constraint(model, ex == 1) + + + +# optimize!(model) + +# println(value.(x)) + +# value(cons) + From e95d0ef2c4b3ba001f9c304344f6827b757fef8e Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 10 Nov 2023 09:43:26 -0500 Subject: [PATCH 02/74] update --- Project.toml | 1 + examples/powermodels/power_flow_dataset.jl | 23 ++++++++++++++-------- src/FullyConnected.jl | 4 ++-- src/L2O.jl | 2 ++ 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/Project.toml b/Project.toml index e060a16..b32b8a4 100644 --- a/Project.toml +++ b/Project.toml @@ -13,6 +13,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845" NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978" +Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 20a4db5..b455be5 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -8,6 +8,10 @@ using Gurobi using Flux using MLJ using L2O +using JuMP +using CUDA + +include("examples/powermodels/powermodels.jl") matpower_case_name = "pglib_opf_case5_pjm" @@ -27,30 +31,33 @@ vm_fr, vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"]), rand(t_bus["vmin"]:0.00 va_fr, va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) a_diff = va_fr - va_to -using Plots +# using Plots f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) -plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); -scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); +# plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); +# scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); # Define Model model = MultitargetNeuralNetworkRegressor(; builder=FullyConnectedBuilder([32, 64]), rng=123, - epochs=1000, + epochs=10, optimiser=Flux.Optimise.Adam(), + # acceleration=CUDALibs(), ) # Define the machine _vm_fr, _vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"], num_samples), rand(t_bus["vmin"]:0.0001:t_bus["vmax"], num_samples) _va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) -mach = machine(model, [_vm_fr _vm_to _va_fr _va_to], vcat([[i[1] i[2]] for i in f_owms_val]...)) -fit!(mach; verbosity=2, force=true) +X = [_vm_fr _vm_to _va_fr _va_to] +y = [i[1] for i in f_owms_val][:,:] +mach = machine(model, X, y) +fit!(mach; verbosity=2) # Make predictions predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) -scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); -scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") +# scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); +# scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") function function_ohms_yt_from(::Dict) return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) diff --git a/src/FullyConnected.jl b/src/FullyConnected.jl index dc73300..4fae4bf 100644 --- a/src/FullyConnected.jl +++ b/src/FullyConnected.jl @@ -47,7 +47,7 @@ function FullyConnected( # Create the output layer connected to the last hidden layer push!(layers, Dense(hidden_sizes[end] + 1, output_size; init=init)) - return FullyConnected(PairwiseFusion(vcat, layers...), pass_through) + return FullyConnected(PairwiseFusion(vcat, layers...), pass_through) |> gpu end mutable struct FullyConnectedBuilder <: MLJFlux.Builder @@ -56,7 +56,7 @@ end function MLJFlux.build(builder::FullyConnectedBuilder, rng, n_in, n_out) init = Flux.glorot_uniform(rng) - return Chain(FullyConnected(n_in, builder.hidden_sizes, n_out; init=init)) + return Chain(FullyConnected(n_in, builder.hidden_sizes, n_out; init=init)) |> gpu end # mutable struct ConvexRegressor <: MLJFlux.MLJFluxDeterministic diff --git a/src/L2O.jl b/src/L2O.jl index e115385..8d49950 100644 --- a/src/L2O.jl +++ b/src/L2O.jl @@ -17,6 +17,7 @@ using Flux using Flux: @functor using Random import MLJFlux.train! +using Optimisers export ArrowFile, CSVFile, @@ -39,5 +40,6 @@ include("arrowrecorder.jl") include("worst_case.jl") include("worst_case_iter.jl") include("FullyConnected.jl") +include("nn_expression.jl") end From 26e9e9b5327bfb4db3b9b67edb0ee7489188a1cd Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 10 Nov 2023 12:09:09 -0500 Subject: [PATCH 03/74] update --- examples/powermodels/power_flow_dataset.jl | 24 +++++++++++++--------- src/FullyConnected.jl | 19 +++++++++++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index b455be5..4b48fc8 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -37,24 +37,28 @@ f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) # scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); # Define Model -model = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([32, 64]), - rng=123, - epochs=10, - optimiser=Flux.Optimise.Adam(), - # acceleration=CUDALibs(), -) +# model = MultitargetNeuralNetworkRegressor(; +# builder=FullyConnectedBuilder([32, 64]), +# rng=123, +# epochs=10, +# optimiser=Flux.Optimise.Adam(), +# acceleration=CUDALibs(), +# ) # Define the machine _vm_fr, _vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"], num_samples), rand(t_bus["vmin"]:0.0001:t_bus["vmax"], num_samples) _va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) X = [_vm_fr _vm_to _va_fr _va_to] y = [i[1] for i in f_owms_val][:,:] -mach = machine(model, X, y) -fit!(mach; verbosity=2) + +model = FullyConnected(4, [4, 4], 1) +train!(model, optimiser, X', Y') + +# mach = machine(model, X, y) +# fit!(mach; verbosity=2) # Make predictions -predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) +# predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) # scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); # scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") diff --git a/src/FullyConnected.jl b/src/FullyConnected.jl index 4fae4bf..cf6c741 100644 --- a/src/FullyConnected.jl +++ b/src/FullyConnected.jl @@ -117,3 +117,22 @@ function MLJFlux.train!( end return training_loss / n_batches end + +function train!(model, optimiser, X, Y) + X = X |> gpu + Y = Y |> gpu + opt_state = Flux.setup(optimiser, model) + data = Flux.DataLoader((X, Y), + batchsize=32, shuffle=true + ) + for d in data + ∇model, _ = gradient(model, d...) do m, x, y # calculate the gradients + loss(m(x), y) + end; + # insert what ever code you want here that needs gradient + # E.g. logging with TensorBoardLogger.jl as histogram so you can see if it is becoming huge + opt_state, model = Optimisers.update(opt_state, model, ∇model) + # Here you might like to check validation set accuracy, and break out to do early stopping + end + return loss(model(X'), Y') +end From dc50b6369a75e8505519aa27692af4623815d7ec Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 10 Nov 2023 14:09:19 -0500 Subject: [PATCH 04/74] update --- examples/powermodels/power_flow_dataset.jl | 13 ++++++++++--- src/FullyConnected.jl | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 4b48fc8..9354e40 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -10,6 +10,7 @@ using MLJ using L2O using JuMP using CUDA +using Logging include("examples/powermodels/powermodels.jl") @@ -35,13 +36,13 @@ a_diff = va_fr - va_to f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) # plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); # scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); - +optimiser=Flux.Optimise.Adam() # Define Model # model = MultitargetNeuralNetworkRegressor(; # builder=FullyConnectedBuilder([32, 64]), # rng=123, # epochs=10, -# optimiser=Flux.Optimise.Adam(), +# optimiser=optimiser, # acceleration=CUDALibs(), # ) @@ -51,8 +52,14 @@ _va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), ra X = [_vm_fr _vm_to _va_fr _va_to] y = [i[1] for i in f_owms_val][:,:] +loss = Flux.mse model = FullyConnected(4, [4, 4], 1) -train!(model, optimiser, X', Y') +for ep in 1:100000 + epochloss = train!(model, loss, optimiser, X', y') + if ep % 100 == 0 + @info("Epoch $ep, loss = $epochloss") + end +end # mach = machine(model, X, y) # fit!(mach; verbosity=2) diff --git a/src/FullyConnected.jl b/src/FullyConnected.jl index cf6c741..4b79d46 100644 --- a/src/FullyConnected.jl +++ b/src/FullyConnected.jl @@ -118,7 +118,7 @@ function MLJFlux.train!( return training_loss / n_batches end -function train!(model, optimiser, X, Y) +function train!(model, loss, optimiser, X, Y) X = X |> gpu Y = Y |> gpu opt_state = Flux.setup(optimiser, model) @@ -134,5 +134,5 @@ function train!(model, optimiser, X, Y) opt_state, model = Optimisers.update(opt_state, model, ∇model) # Here you might like to check validation set accuracy, and break out to do early stopping end - return loss(model(X'), Y') + return loss(model(X), Y) end From f516057f29fba5b8212a7a8ccee20f10b20195bf Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 10 Nov 2023 14:13:16 -0500 Subject: [PATCH 05/74] update --- examples/powermodels/power_flow_dataset.jl | 3 ++- src/FullyConnected.jl | 5 ++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 9354e40..d4f0adf 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -54,8 +54,9 @@ y = [i[1] for i in f_owms_val][:,:] loss = Flux.mse model = FullyConnected(4, [4, 4], 1) +opt_state = Flux.setup(optimiser, model) for ep in 1:100000 - epochloss = train!(model, loss, optimiser, X', y') + epochloss = train!(model, loss, opt_state, X', y') if ep % 100 == 0 @info("Epoch $ep, loss = $epochloss") end diff --git a/src/FullyConnected.jl b/src/FullyConnected.jl index 4b79d46..4a46cfb 100644 --- a/src/FullyConnected.jl +++ b/src/FullyConnected.jl @@ -118,12 +118,11 @@ function MLJFlux.train!( return training_loss / n_batches end -function train!(model, loss, optimiser, X, Y) +function train!(model, loss, opt_state, X, Y; batchsize=32, shuffle=true) X = X |> gpu Y = Y |> gpu - opt_state = Flux.setup(optimiser, model) data = Flux.DataLoader((X, Y), - batchsize=32, shuffle=true + batchsize=batchsize, shuffle=shuffle ) for d in data ∇model, _ = gradient(model, d...) do m, x, y # calculate the gradients From 5562658b323643c444933d1068c354d6b7b703c4 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 10 Nov 2023 15:24:32 -0500 Subject: [PATCH 06/74] update --- examples/powermodels/power_flow_dataset.jl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index d4f0adf..cb28430 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -53,12 +53,20 @@ X = [_vm_fr _vm_to _va_fr _va_to] y = [i[1] for i in f_owms_val][:,:] loss = Flux.mse -model = FullyConnected(4, [4, 4], 1) +model = FullyConnected(4, [3], 1) opt_state = Flux.setup(optimiser, model) +best_model = model +best_loss = 1000000 for ep in 1:100000 epochloss = train!(model, loss, opt_state, X', y') if ep % 100 == 0 @info("Epoch $ep, loss = $epochloss") + if epochloss < best_loss + best_loss = epochloss + best_model = deepcopy(model) + else + model = deepcopy(best_model) + end end end From e49435f3f97d51b208ab3868575fe3931b600811 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 10 Nov 2023 15:55:00 -0500 Subject: [PATCH 07/74] update --- examples/powermodels/power_flow_dataset.jl | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index cb28430..71826f2 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -52,6 +52,15 @@ _va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), ra X = [_vm_fr _vm_to _va_fr _va_to] y = [i[1] for i in f_owms_val][:,:] +# mach = machine(model, X, y) +# fit!(mach; verbosity=2) + +# Make predictions +# predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) + +# scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); +# scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") + loss = Flux.mse model = FullyConnected(4, [3], 1) opt_state = Flux.setup(optimiser, model) @@ -70,14 +79,6 @@ for ep in 1:100000 end end -# mach = machine(model, X, y) -# fit!(mach; verbosity=2) - -# Make predictions -# predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) - -# scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); -# scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") function function_ohms_yt_from(::Dict) return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) From 2d779e700f259d9743fa5eeada3478eafda54bde Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 10 Nov 2023 15:59:56 -0500 Subject: [PATCH 08/74] update --- examples/powermodels/power_flow_dataset.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 71826f2..82b9b26 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -66,8 +66,8 @@ model = FullyConnected(4, [3], 1) opt_state = Flux.setup(optimiser, model) best_model = model best_loss = 1000000 -for ep in 1:100000 - epochloss = train!(model, loss, opt_state, X', y') +for ep in 1:10 + epochloss = L2O.train!(model, loss, opt_state, X', y') if ep % 100 == 0 @info("Epoch $ep, loss = $epochloss") if epochloss < best_loss From 5a43f5fc5506127961d4333c70ae17ae4414289e Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Mon, 11 Dec 2023 15:37:46 -0500 Subject: [PATCH 09/74] update --- examples/powermodels/power_flow_dataset.jl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 82b9b26..042f529 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -18,7 +18,7 @@ matpower_case_name = "pglib_opf_case5_pjm" network_data = make_basic_network(pglib(matpower_case_name)) -branch = network_data["branch"]["1"] +branch = network_data["branch"]["2"] f_bus_index = branch["f_bus"] f_bus = network_data["bus"]["$f_bus_index"] t_bus_index = branch["t_bus"] @@ -32,10 +32,10 @@ vm_fr, vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"]), rand(t_bus["vmin"]:0.00 va_fr, va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) a_diff = va_fr - va_to -# using Plots +using Plots f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) -# plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); -# scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr"); +plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); +scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr") optimiser=Flux.Optimise.Adam() # Define Model # model = MultitargetNeuralNetworkRegressor(; @@ -62,7 +62,7 @@ y = [i[1] for i in f_owms_val][:,:] # scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") loss = Flux.mse -model = FullyConnected(4, [3], 1) +model = FullyConnected(4, [3], 2) opt_state = Flux.setup(optimiser, model) best_model = model best_loss = 1000000 @@ -81,11 +81,11 @@ end function function_ohms_yt_from(::Dict) - return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) + return (vm_fr, vm_to, va_fr, va_to) -> model([vm_fr, vm_to, va_fr, va_to]) end function function_ohms_yt_to(branch::Dict) - return (vm_fr, vm_to, va_fr, va_to) -> mach.fitresult[1]([vm_fr, vm_to, va_fr, va_to]) + return (vm_fr, vm_to, va_fr, va_to) -> model([vm_fr, vm_to, va_fr, va_to]) end pm = instantiate_model( From 47cde3621ac57bbbbb9d778615b9eadc26a73f25 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Wed, 3 Jan 2024 16:29:02 -0500 Subject: [PATCH 10/74] update --- Project.toml | 2 +- examples/powermodels/unitcommitment.jl | 208 +++++++++++++++++++++++++ src/intdecomposition.jl | 132 ++++++++++++++++ 3 files changed, 341 insertions(+), 1 deletion(-) create mode 100644 examples/powermodels/unitcommitment.jl create mode 100644 src/intdecomposition.jl diff --git a/Project.toml b/Project.toml index b32b8a4..cec6647 100644 --- a/Project.toml +++ b/Project.toml @@ -24,7 +24,7 @@ Arrow = "2" CSV = "0.10" Dualization = "0.5" JuMP = "1" -ParametricOptInterface = "0.5" +ParametricOptInterface = "0.7" julia = "1.6" [extras] diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl new file mode 100644 index 0000000..83fe030 --- /dev/null +++ b/examples/powermodels/unitcommitment.jl @@ -0,0 +1,208 @@ +using TestEnv +TestEnv.activate() + +using PowerModels +using PGLib +using Gurobi +using Ipopt +using JuMP +using Juniper +using ParametricOptInterface +const POI = ParametricOptInterface +using LinearAlgebra +using Logging + +# "pglib_opf_case118_ieee.m" #"pglib_opf_case5_pjm.m" # "300_ieee" +matpower_case_name = "300_ieee" + +network_data = make_basic_network(pglib(matpower_case_name)) + +network_formulation = DCPPowerModel # DCPPowerModel ACPPowerModel + +unique([g["pmin"] for g in values(network_data["gen"])]) + +########################################## +## Branch and Bound +########################################## + +ipopt = optimizer_with_attributes(Ipopt.Optimizer, "print_level"=>0) +# optimizer = optimizer_with_attributes(Juniper.Optimizer, "nl_solver"=>ipopt) + +gurobi_optimizer = Gurobi.Optimizer +jump_model = Model(gurobi_optimizer) + +# Create commit variables +@variable(jump_model, u[i in 1:length(network_data["gen"])] , Bin) + +# Create power flow constraints +pm = PowerModels.instantiate_model(network_data, network_formulation, PowerModels.build_opf; jump_model=jump_model) + +# link generation and commit +pg = PowerModels.var(pm, :pg) +@constraint(jump_model, + pmin[i in 1:length(network_data["gen"])], pg[i] >= u[i] * network_data["gen"]["$i"]["pmax"] * 0.4 +) + +@constraint(jump_model, + pmax[i in 1:length(network_data["gen"])], pg[i] <= u[i] * network_data["gen"]["$i"]["pmax"] +) + +JuMP.optimize!(jump_model) + +termination_status(jump_model) +objective_value(jump_model) + +########################################## +## Cutting Planes +########################################## + +# Set the lower problem + +# inner problem +poi_ipopt = () -> POI.Optimizer(Ipopt.Optimizer()) +# set_optimizer_attribute(poi_ipopt, "print_level", 0) +inner_model = Model(poi_ipopt) + +# Create commit parameters +start_values = zeros(length(network_data["gen"])) +@variable(inner_model, u_inner[i= 1:length(network_data["gen"])] in MOI.Parameter.(start_values)) + +# slack variables +@variable(inner_model, slack[i= 1:length(network_data["gen"])] >= 0) + +# Create power flow constraints +pm = PowerModels.instantiate_model(network_data, network_formulation, PowerModels.build_opf; jump_model=inner_model) + +# link generation and commit +pg = PowerModels.var(pm, :pg) +@constraint(inner_model, + pmin[i in 1:length(network_data["gen"])], pg[i] + slack[i] >= u_inner[i] * network_data["gen"]["$i"]["pmax"] * 0.4 +) + +@constraint(inner_model, + pmax[i in 1:length(network_data["gen"])], pg[i] - slack[i] <= u_inner[i] * network_data["gen"]["$i"]["pmax"] +) + +obj = objective_function(inner_model) +obj = obj + sum(slack) * 1e7 +set_objective(inner_model, MIN_SENSE, obj) + +JuMP.optimize!(inner_model) + +termination_status(inner_model) +primal_status(inner_model) +obj_value = objective_value(inner_model) +duals = [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner] + +# cut list +cuts_intercept = [obj_value] +cuts_slope = [duals] +cuts_point = [start_values] + +gurobi_optimizer = optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 0) +upper_model = Model(gurobi_optimizer) +# Create commit variables +@variable(upper_model, u[i in 1:length(network_data["gen"])] , Bin) + +# cutting planes epigraph variable +bound = -1e7 +@variable(upper_model, θ >= bound) + +# minimize the epigraph variable +@objective(upper_model, Min, θ) + +max_iter = 1000 +i = 1 +gap = Array{Float64}(undef, max_iter) +upper_bound = Array{Float64}(undef, max_iter) +lower_bound = Array{Float64}(undef, max_iter) +while i <= max_iter + # Add cuts + @constraint(upper_model, + θ >= cuts_intercept[i] + dot(cuts_slope[i], u .- cuts_point[i]) + ) + + JuMP.optimize!(upper_model) + + # Add point to the lists + if termination_status(upper_model) == MOI.OPTIMAL + push!(cuts_point, value.(u)) + bound = objective_value(upper_model) + else + println("Upper problem failed") + break; + end + + # run inner problem + MOI.set.(inner_model, POI.ParameterValue(), u_inner, cuts_point[i+1]) + JuMP.optimize!(inner_model) + + # Add cut to the lists + if termination_status(inner_model) == MOI.OPTIMAL || termination_status(inner_model) == MOI.LOCALLY_SOLVED + push!(cuts_intercept, objective_value(inner_model)) + push!(cuts_slope, [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]) + else + println("Inner problem failed") + break; + end + + # test convergence + u_bound = minimum(cuts_intercept) + upper_bound[i] = u_bound + lower_bound[i] = bound + gap[i] = abs(bound - u_bound) / u_bound + if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) + println("Converged") + break; + else + @info "Iteration $i" bound cuts_intercept[i] + end + i += 1 +end +i = ifelse(i >= max_iter, max_iter, i) + +gap = gap[1:i] +upper_bound = upper_bound[1:i] +lower_bound = lower_bound[1:i] + +using Plots + +# Plot upper and lower bounds +plt = plot(2:i, upper_bound[2:i], label="Upper bound", title="Case $matpower_case_name", xlabel="Iteration", ylabel="Objective value"); +plot!(plt, 2:i, lower_bound[2:i], label="Lower bound") + + +########### + +using HiGHS +using JuMP +using UnitCommitment + +import UnitCommitment: + Formulation, + KnuOstWat2018, + MorLatRam2013, + ShiftFactorsFormulation + +# Read benchmark instance +instance = UnitCommitment.read_benchmark( + "matpower/case118/2017-02-01", +) + + +# Construct model (using state-of-the-art defaults) +model = UnitCommitment.build_model( + instance = instance, + optimizer = HiGHS.Optimizer, +) + +binary_model, var_mapping, cons_mapping = copy_binary_model(model) +delete_binary_terms!(model) + +for var in values(var_mapping) + JuMP.fix( + var, + 1.0, + force=true, + ) +end diff --git a/src/intdecomposition.jl b/src/intdecomposition.jl new file mode 100644 index 0000000..33d932b --- /dev/null +++ b/src/intdecomposition.jl @@ -0,0 +1,132 @@ +using JuMP + +# # Create a JuMP model object +# m = Model() +# M = 1000 +# # Create binary variables +# @variable(m, x[1:2], Bin) +# # Create linear variable +# @variable(m, 0.0 <= y[1:2] <= 10.0) +# # Create linear constraint +# @constraint(m, 5 * y[1] + 3 * y[2] <= 1) +# # Create binary constraint +# @constraint(m, x[1] + x[2] <= 1) +# # Create big-M constraint +# @constraint(m, y[1] <= M * x[1]) +# @constraint(m, y[2] <= M * x[2]) +# # Create objective function +# @objective(m, Max, 0.8 * x[1] + 0.5 * x[2] + y[1] + y[2]) + + +""" + all_binary_variables(m::Model) + +Return a list of all binary variables in the model `m`. +""" +function all_binary_variables(m::Model) + return all_variables(m)[is_binary.(all_variables(m))] +end + +variables(exp::VariableRef) = [exp] +variables(exp::AffExpr) = [var for var in keys(exp.terms)] + +function variables(con::ConstraintRef) + con_exp = constraint_object(con).func + return variables(con_exp) +end + +""" + all_binary_constraints(m::Model) + +Return a list of all constraints in the model `m` +containing only binary variables. +""" +function all_binary_constraints(m::Model) + all_cons_types = list_of_constraint_types(m) + consrefs = [] + for con_type in all_cons_types + cons = all_constraints(m, con_type...) + for con in cons + if all([is_binary(var) for var in variables(con)]) + push!(consrefs, con) + end + end + end + return consrefs +end + +# Copy variables +function copy_binary_variables(m_to::Model, m_from::Model) + mapping = Dict() + for var in all_binary_variables(m_from) + ref_to = @variable(m_to, base_name = name(var), binary = true) + mapping[var] = ref_to + end + return mapping +end + +# Copy constraints +function copy_binary_constraint(::Model, ::VariableRef, set, var_mapping) + return nothing +end +function copy_binary_constraint(m_to::Model, func::AffExpr, set, var_mapping) + terms_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() + for var in keys(func.terms) + terms_dict[var_mapping[var]] = func.terms[var] + end + exp = GenericAffExpr(func.constant, terms_dict) + return @constraint(m_to, exp in set) +end + +function copy_binary_constraints(m_to::Model, m_from::Model, var_mapping::Dict) + cons_to = [] + for con in all_binary_constraints(m_from) + con_exp = constraint_object(con) + ref_to = copy_binary_constraint(m_to, con_exp.func, con_exp.set, var_mapping) + push!(cons_to, ref_to) + end + return cons_to +end + +# Copy objective function +function copy_binary_objective(m_to::Model, m_from::Model, var_mapping::Dict) + obj = objective_function(m_from) + obj2_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() + for var in keys(obj.terms) + if is_binary(var) + obj2_dict[var_mapping[var]] = obj.terms[var] + end + end + obj2 = GenericAffExpr(obj.constant, obj2_dict) + @objective(m_to, objective_sense(m_from), obj2) +end + +# Copy binary model +function copy_binary_model(m_from::Model) + m_to = Model() + var_mapping = copy_binary_variables(m_to, m_from) + cons_mapping = copy_binary_constraints(m_to, m_from, var_mapping) + copy_binary_objective(m_to, m_from, var_mapping) + return m_to, var_mapping, cons_mapping +end + +# remove binary terms +function delete_binary_terms!(m::Model) + obj = objective_function(m) + for var in keys(obj.terms) + if is_binary(var) + println("deleting $var") + delete!(obj.terms, var) + end + end + obj.constant = 0.0 + set_objective_function(m, obj) + + # remove binary constraints from the original model + for con in all_binary_constraints(m) + delete(m, con) + end +end + +m2, var_mapping, cons_mapping = copy_binary_model(m) +delete_binary_terms!(m) From 39f0922ea525b7929d15b3399aedab6dee6f01a3 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Thu, 4 Jan 2024 13:13:05 -0500 Subject: [PATCH 11/74] running code --- examples/powermodels/unitcommitment.jl | 161 +++++++++-------- src/cutting_planes.jl | 229 +++++++++++++++++++++++++ src/intdecomposition.jl | 132 -------------- 3 files changed, 314 insertions(+), 208 deletions(-) create mode 100644 src/cutting_planes.jl delete mode 100644 src/intdecomposition.jl diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index 83fe030..3f8607d 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -94,72 +94,72 @@ primal_status(inner_model) obj_value = objective_value(inner_model) duals = [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner] -# cut list -cuts_intercept = [obj_value] -cuts_slope = [duals] -cuts_point = [start_values] - -gurobi_optimizer = optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 0) -upper_model = Model(gurobi_optimizer) -# Create commit variables -@variable(upper_model, u[i in 1:length(network_data["gen"])] , Bin) - -# cutting planes epigraph variable -bound = -1e7 -@variable(upper_model, θ >= bound) - -# minimize the epigraph variable -@objective(upper_model, Min, θ) - -max_iter = 1000 -i = 1 -gap = Array{Float64}(undef, max_iter) -upper_bound = Array{Float64}(undef, max_iter) -lower_bound = Array{Float64}(undef, max_iter) -while i <= max_iter - # Add cuts - @constraint(upper_model, - θ >= cuts_intercept[i] + dot(cuts_slope[i], u .- cuts_point[i]) - ) - - JuMP.optimize!(upper_model) - - # Add point to the lists - if termination_status(upper_model) == MOI.OPTIMAL - push!(cuts_point, value.(u)) - bound = objective_value(upper_model) - else - println("Upper problem failed") - break; - end - - # run inner problem - MOI.set.(inner_model, POI.ParameterValue(), u_inner, cuts_point[i+1]) - JuMP.optimize!(inner_model) - - # Add cut to the lists - if termination_status(inner_model) == MOI.OPTIMAL || termination_status(inner_model) == MOI.LOCALLY_SOLVED - push!(cuts_intercept, objective_value(inner_model)) - push!(cuts_slope, [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]) - else - println("Inner problem failed") - break; - end - - # test convergence - u_bound = minimum(cuts_intercept) - upper_bound[i] = u_bound - lower_bound[i] = bound - gap[i] = abs(bound - u_bound) / u_bound - if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) - println("Converged") - break; - else - @info "Iteration $i" bound cuts_intercept[i] - end - i += 1 -end -i = ifelse(i >= max_iter, max_iter, i) +# # cut list +# cuts_intercept = [obj_value] +# cuts_slope = [duals] +# cuts_point = [start_values] + +# gurobi_optimizer = optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 0) +# upper_model = Model(gurobi_optimizer) +# # Create commit variables +# @variable(upper_model, u[i in 1:length(network_data["gen"])] , Bin) + +# # cutting planes epigraph variable +# bound = -1e7 +# @variable(upper_model, θ >= bound) + +# # minimize the epigraph variable +# @objective(upper_model, Min, θ) + +# max_iter = 1000 +# i = 1 +# gap = Array{Float64}(undef, max_iter) +# upper_bound = Array{Float64}(undef, max_iter) +# lower_bound = Array{Float64}(undef, max_iter) +# while i <= max_iter +# # Add cuts +# @constraint(upper_model, +# θ >= cuts_intercept[i] + dot(cuts_slope[i], u .- cuts_point[i]) +# ) + +# JuMP.optimize!(upper_model) + +# # Add point to the lists +# if termination_status(upper_model) == MOI.OPTIMAL +# push!(cuts_point, value.(u)) +# bound = objective_value(upper_model) +# else +# println("Upper problem failed") +# break; +# end + +# # run inner problem +# MOI.set.(inner_model, POI.ParameterValue(), u_inner, cuts_point[i+1]) +# JuMP.optimize!(inner_model) + +# # Add cut to the lists +# if termination_status(inner_model) == MOI.OPTIMAL || termination_status(inner_model) == MOI.LOCALLY_SOLVED +# push!(cuts_intercept, objective_value(inner_model)) +# push!(cuts_slope, [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]) +# else +# println("Inner problem failed") +# break; +# end + +# # test convergence +# u_bound = minimum(cuts_intercept) +# upper_bound[i] = u_bound +# lower_bound[i] = bound +# gap[i] = abs(bound - u_bound) / u_bound +# if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) +# println("Converged") +# break; +# else +# @info "Iteration $i" bound cuts_intercept[i] +# end +# i += 1 +# end +# i = ifelse(i >= max_iter, max_iter, i) gap = gap[1:i] upper_bound = upper_bound[1:i] @@ -174,9 +174,22 @@ plot!(plt, 2:i, lower_bound[2:i], label="Lower bound") ########### +# function add_deficit!(model; penalty=1e7) +# @variable(model, deficit[1:length(model[:eq_power_balance])]) +# @variable(model, norm_deficit) +# for (i, eq) in model[:eq_power_balance] +# set_normalized_coefficient(eq, deficit[i], 1) +# end +# @constraint(model, [norm_deficit; deficit] in MOI.NormOneCone(1 + length(deficit))) +# set_objective_coefficient(model, norm_deficit, penalty) +# return norm_deficit +# end + using HiGHS using JuMP using UnitCommitment +import ParametricOptInterface as POI +using LinearAlgebra import UnitCommitment: Formulation, @@ -189,20 +202,16 @@ instance = UnitCommitment.read_benchmark( "matpower/case118/2017-02-01", ) +inner_solver = () -> POI.Optimizer(HiGHS.Optimizer()) +upper_solver = HiGHS.Optimizer # Construct model (using state-of-the-art defaults) model = UnitCommitment.build_model( instance = instance, - optimizer = HiGHS.Optimizer, + optimizer = inner_solver, ) -binary_model, var_mapping, cons_mapping = copy_binary_model(model) -delete_binary_terms!(model) +# Solve model using cutting plane algorithm +include("src/cutting_planes.jl") -for var in values(var_mapping) - JuMP.fix( - var, - 1.0, - force=true, - ) -end +upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) \ No newline at end of file diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl new file mode 100644 index 0000000..f2bfc28 --- /dev/null +++ b/src/cutting_planes.jl @@ -0,0 +1,229 @@ +using JuMP + +""" + all_binary_variables(m::Model) + +Return a list of all binary variables in the model `m`. +""" +function all_binary_variables(m::Model) + return all_variables(m)[is_binary.(all_variables(m))] +end + +variables(exp::VariableRef) = [exp] +variables(exp::Vector{VariableRef}) = exp +variables(exp::AffExpr) = [var for var in keys(exp.terms)] + +function variables(con::ConstraintRef) + con_exp = constraint_object(con).func + return variables(con_exp) +end + +""" + all_binary_constraints(m::Model) + +Return a list of all constraints in the model `m` +containing only binary variables. +""" +function all_binary_constraints(m::Model) + all_cons_types = list_of_constraint_types(m) + consrefs = [] + for con_type in all_cons_types + cons = all_constraints(m, con_type...) + for con in cons + if all([is_binary(var) for var in variables(con)]) + push!(consrefs, con) + end + end + end + return consrefs +end + +# Copy variables +function copy_binary_variables(m_to::Model, m_from::Model) + mapping = Dict() + for var in all_binary_variables(m_from) + ref_to = @variable(m_to, base_name = name(var), binary = true) + mapping[var] = ref_to + end + return mapping +end + +# Copy constraints +function copy_binary_constraint(::Model, ::VariableRef, set, var_mapping) + return nothing +end +function copy_binary_constraint(m_to::Model, func::AffExpr, set, var_mapping) + terms_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() + for var in keys(func.terms) + terms_dict[var_mapping[var]] = func.terms[var] + end + exp = GenericAffExpr(func.constant, terms_dict) + return @constraint(m_to, exp in set) +end + +function copy_binary_constraints(m_to::Model, m_from::Model, var_mapping::Dict) + cons_to = [] + for con in all_binary_constraints(m_from) + con_exp = constraint_object(con) + ref_to = copy_binary_constraint(m_to, con_exp.func, con_exp.set, var_mapping) + push!(cons_to, ref_to) + end + return cons_to +end + +# Copy objective function +function copy_binary_objective(m_to::Model, m_from::Model, var_mapping::Dict) + obj = objective_function(m_from) + obj2_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() + for var in keys(obj.terms) + if is_binary(var) + obj2_dict[var_mapping[var]] = obj.terms[var] + end + end + obj2 = GenericAffExpr(obj.constant, obj2_dict) + @objective(m_to, objective_sense(m_from), obj2) +end + +# Copy binary model +function copy_binary_model(m_from::Model) + m_to = Model() + var_mapping = copy_binary_variables(m_to, m_from) + cons_mapping = copy_binary_constraints(m_to, m_from, var_mapping) + copy_binary_objective(m_to, m_from, var_mapping) + return m_to, var_mapping, cons_mapping +end + +# remove binary terms +function delete_binary_terms!(m::Model) + obj = objective_function(m) + for var in keys(obj.terms) + if is_binary(var) + println("deleting $var") + delete!(obj.terms, var) + end + end + obj.constant = 0.0 + set_objective_function(m, obj) + + # remove binary constraints from the original model + for con in all_binary_constraints(m) + delete(m, con) + end +end + +function add_deficit_constraints!(model::Model; penalty=1e7) + consrefs = [con for con in all_constraints(model, include_variable_in_set_constraints=false)] + @variable(model, deficit[1:length(consrefs)]) + @variable(model, norm_deficit) + for (i, eq) in enumerate(consrefs) + set_normalized_coefficient(eq, deficit[i], 1) + end + @constraint(model, [norm_deficit; deficit] in MOI.NormOneCone(1 + length(deficit))) + set_objective_coefficient(model, norm_deficit, penalty) + return norm_deficit +end + +# fix binary variables to POI parameters +function fix_binary_variables!(inner_model::Model, inner_2_upper_map::Dict) + var_mapping = Dict() + for (to_var, from_var) in inner_2_upper_map + param = @variable(inner_model, set = MOI.Parameter(0.0)) + @constraint(inner_model, to_var == param) + set_upper_bound(to_var, 1.1) + set_lower_bound(to_var, -0.1) + var_mapping[from_var] = param + end + # QUESTION: DOES values() and keys() return the same order? + return var_mapping +end + +# set binary variables +function set_binary_variables!(inner_model::Model, var_mapping::Dict, vals) + for (i, to_var) in enumerate(values(var_mapping)) + MOI.set(inner_model, POI.ParameterValue(), to_var, vals[i]) + end +end + +# add cut +function add_cut!(upper_model::Model, cut_intercept, cut_slope, cut_point, u) + @constraint(upper_model, + upper_model[:θ] >= cut_intercept + dot(cut_slope, u .- cut_point) + ) +end + +function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_iter::Int=1000) + upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) + delete_binary_terms!(inner_model) + add_deficit_constraints!(inner_model) + upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) + u = keys(upper_2_inner) + u_inner = values(upper_2_inner) + set_optimizer(inner_model, inner_solver) + set_optimizer(upper_model, upper_solver) + + # cut list + JuMP.optimize!(inner_model) + cuts_intercept = [objective_value(inner_model)] + cuts_slope = [[MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]] + cuts_point = [zeros(length(upper_2_inner))] + + # cutting planes epigraph variable + bound = -1e7 + @variable(upper_model, θ >= bound) + obj_upper = objective_function(upper_model) + @objective(upper_model, Min, obj_upper + θ) + + # loop + i = 1 + gap = Array{Float64}(undef, max_iter) + upper_bound = Array{Float64}(undef, max_iter) + lower_bound = Array{Float64}(undef, max_iter) + while i <= max_iter + # Add cuts + add_cut!(upper_model, cuts_intercept[i], cuts_slope[i], cuts_point[i], u) + + JuMP.optimize!(upper_model) + + # Add point to the lists + if termination_status(upper_model) == MOI.OPTIMAL + push!(cuts_point, value.(keys(upper_2_inner))) + bound = objective_value(upper_model) + else + println("Upper problem failed") + break; + end + + # run inner problem + set_binary_variables!(inner_model, upper_2_inner, cuts_point[i+1]) + JuMP.optimize!(inner_model) + + # Add cut to the lists + if termination_status(inner_model) == MOI.OPTIMAL || termination_status(inner_model) == MOI.LOCALLY_SOLVED + push!(cuts_intercept, objective_value(inner_model)) + push!(cuts_slope, [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]) + else + println("Inner problem failed") + break; + end + + # test convergence + u_bound = minimum(cuts_intercept) + upper_bound[i] = u_bound + lower_bound[i] = bound + gap[i] = abs(bound - u_bound) / u_bound + if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) + println("Converged") + break; + else + @info "Iteration $i" bound cuts_intercept[i] + end + i += 1 + end + i = ifelse(i >= max_iter, max_iter, i) + + gap = gap[1:i] + upper_bound = upper_bound[1:i] + lower_bound = lower_bound[1:i] + return upper_model, lower_bound, upper_bound, gap +end + diff --git a/src/intdecomposition.jl b/src/intdecomposition.jl deleted file mode 100644 index 33d932b..0000000 --- a/src/intdecomposition.jl +++ /dev/null @@ -1,132 +0,0 @@ -using JuMP - -# # Create a JuMP model object -# m = Model() -# M = 1000 -# # Create binary variables -# @variable(m, x[1:2], Bin) -# # Create linear variable -# @variable(m, 0.0 <= y[1:2] <= 10.0) -# # Create linear constraint -# @constraint(m, 5 * y[1] + 3 * y[2] <= 1) -# # Create binary constraint -# @constraint(m, x[1] + x[2] <= 1) -# # Create big-M constraint -# @constraint(m, y[1] <= M * x[1]) -# @constraint(m, y[2] <= M * x[2]) -# # Create objective function -# @objective(m, Max, 0.8 * x[1] + 0.5 * x[2] + y[1] + y[2]) - - -""" - all_binary_variables(m::Model) - -Return a list of all binary variables in the model `m`. -""" -function all_binary_variables(m::Model) - return all_variables(m)[is_binary.(all_variables(m))] -end - -variables(exp::VariableRef) = [exp] -variables(exp::AffExpr) = [var for var in keys(exp.terms)] - -function variables(con::ConstraintRef) - con_exp = constraint_object(con).func - return variables(con_exp) -end - -""" - all_binary_constraints(m::Model) - -Return a list of all constraints in the model `m` -containing only binary variables. -""" -function all_binary_constraints(m::Model) - all_cons_types = list_of_constraint_types(m) - consrefs = [] - for con_type in all_cons_types - cons = all_constraints(m, con_type...) - for con in cons - if all([is_binary(var) for var in variables(con)]) - push!(consrefs, con) - end - end - end - return consrefs -end - -# Copy variables -function copy_binary_variables(m_to::Model, m_from::Model) - mapping = Dict() - for var in all_binary_variables(m_from) - ref_to = @variable(m_to, base_name = name(var), binary = true) - mapping[var] = ref_to - end - return mapping -end - -# Copy constraints -function copy_binary_constraint(::Model, ::VariableRef, set, var_mapping) - return nothing -end -function copy_binary_constraint(m_to::Model, func::AffExpr, set, var_mapping) - terms_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() - for var in keys(func.terms) - terms_dict[var_mapping[var]] = func.terms[var] - end - exp = GenericAffExpr(func.constant, terms_dict) - return @constraint(m_to, exp in set) -end - -function copy_binary_constraints(m_to::Model, m_from::Model, var_mapping::Dict) - cons_to = [] - for con in all_binary_constraints(m_from) - con_exp = constraint_object(con) - ref_to = copy_binary_constraint(m_to, con_exp.func, con_exp.set, var_mapping) - push!(cons_to, ref_to) - end - return cons_to -end - -# Copy objective function -function copy_binary_objective(m_to::Model, m_from::Model, var_mapping::Dict) - obj = objective_function(m_from) - obj2_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() - for var in keys(obj.terms) - if is_binary(var) - obj2_dict[var_mapping[var]] = obj.terms[var] - end - end - obj2 = GenericAffExpr(obj.constant, obj2_dict) - @objective(m_to, objective_sense(m_from), obj2) -end - -# Copy binary model -function copy_binary_model(m_from::Model) - m_to = Model() - var_mapping = copy_binary_variables(m_to, m_from) - cons_mapping = copy_binary_constraints(m_to, m_from, var_mapping) - copy_binary_objective(m_to, m_from, var_mapping) - return m_to, var_mapping, cons_mapping -end - -# remove binary terms -function delete_binary_terms!(m::Model) - obj = objective_function(m) - for var in keys(obj.terms) - if is_binary(var) - println("deleting $var") - delete!(obj.terms, var) - end - end - obj.constant = 0.0 - set_objective_function(m, obj) - - # remove binary constraints from the original model - for con in all_binary_constraints(m) - delete(m, con) - end -end - -m2, var_mapping, cons_mapping = copy_binary_model(m) -delete_binary_terms!(m) From 0831e99e57203176e8df2dacdc119edf277fbe10 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Thu, 4 Jan 2024 16:24:30 -0500 Subject: [PATCH 12/74] code working --- examples/powermodels/unitcommitment.jl | 4 +-- src/cutting_planes.jl | 36 ++++++++++++++------------ 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index 3f8607d..81de4ef 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -199,9 +199,9 @@ import UnitCommitment: # Read benchmark instance instance = UnitCommitment.read_benchmark( - "matpower/case118/2017-02-01", + "matpower/case14/2017-01-01", ) - +# instance.time = 4 inner_solver = () -> POI.Optimizer(HiGHS.Optimizer()) upper_solver = HiGHS.Optimizer diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl index f2bfc28..ad579fa 100644 --- a/src/cutting_planes.jl +++ b/src/cutting_planes.jl @@ -138,8 +138,8 @@ function fix_binary_variables!(inner_model::Model, inner_2_upper_map::Dict) end # set binary variables -function set_binary_variables!(inner_model::Model, var_mapping::Dict, vals) - for (i, to_var) in enumerate(values(var_mapping)) +function set_binary_variables!(inner_model::Model, u_inner, vals) + for (i, to_var) in enumerate(u_inner) MOI.set(inner_model, POI.ParameterValue(), to_var, vals[i]) end end @@ -151,7 +151,16 @@ function add_cut!(upper_model::Model, cut_intercept, cut_slope, cut_point, u) ) end -function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_iter::Int=1000) +function solve_or_shuffle!(model::Model, u, cuts_point) + JuMP.optimize!(model) + new_point = rand([0;1],length(u)) + if termination_status(model) != MOI.OPTIMAL || isapprox(new_point, cuts_point[end]) + return rand([0;1],length(u)) + end + return new_point +end + +function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_iter::Int=4000, atol::Real=0.1, bound::Real=0.0) upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) delete_binary_terms!(inner_model) add_deficit_constraints!(inner_model) @@ -165,10 +174,9 @@ function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_ite JuMP.optimize!(inner_model) cuts_intercept = [objective_value(inner_model)] cuts_slope = [[MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]] - cuts_point = [zeros(length(upper_2_inner))] + cuts_point = [rand([0;1],length(upper_2_inner))] # cutting planes epigraph variable - bound = -1e7 @variable(upper_model, θ >= bound) obj_upper = objective_function(upper_model) @objective(upper_model, Min, obj_upper + θ) @@ -182,19 +190,15 @@ function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_ite # Add cuts add_cut!(upper_model, cuts_intercept[i], cuts_slope[i], cuts_point[i], u) - JuMP.optimize!(upper_model) - # Add point to the lists - if termination_status(upper_model) == MOI.OPTIMAL - push!(cuts_point, value.(keys(upper_2_inner))) - bound = objective_value(upper_model) - else - println("Upper problem failed") - break; + new_point = solve_or_shuffle!(upper_model, u, cuts_point) + push!(cuts_point, new_point) + if value(θ) > bound + bound = value(θ) end # run inner problem - set_binary_variables!(inner_model, upper_2_inner, cuts_point[i+1]) + set_binary_variables!(inner_model, u_inner, cuts_point[i+1]) JuMP.optimize!(inner_model) # Add cut to the lists @@ -207,11 +211,11 @@ function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_ite end # test convergence - u_bound = minimum(cuts_intercept) + u_bound = cuts_intercept[i+1] upper_bound[i] = u_bound lower_bound[i] = bound gap[i] = abs(bound - u_bound) / u_bound - if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) + if i > 10 && gap[i] < atol && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) println("Converged") break; else From 5d119f84f6553a76426647ec59665aef38319115 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Thu, 4 Jan 2024 16:52:54 -0500 Subject: [PATCH 13/74] update working --- examples/powermodels/unitcommitment.jl | 2 +- src/cutting_planes.jl | 20 +++++++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index 81de4ef..1528476 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -201,7 +201,7 @@ import UnitCommitment: instance = UnitCommitment.read_benchmark( "matpower/case14/2017-01-01", ) -# instance.time = 4 +instance.time = 15 inner_solver = () -> POI.Optimizer(HiGHS.Optimizer()) upper_solver = HiGHS.Optimizer diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl index ad579fa..7f32edb 100644 --- a/src/cutting_planes.jl +++ b/src/cutting_planes.jl @@ -151,12 +151,22 @@ function add_cut!(upper_model::Model, cut_intercept, cut_slope, cut_point, u) ) end -function solve_or_shuffle!(model::Model, u, cuts_point) +function solve_or_shuffle!(model::Model, u, cuts_point, iteration) JuMP.optimize!(model) - new_point = rand([0;1],length(u)) - if termination_status(model) != MOI.OPTIMAL || isapprox(new_point, cuts_point[end]) + + if termination_status(model) != MOI.OPTIMAL return rand([0;1],length(u)) end + + new_point = round.(Int, value.(u)) + if iteration > 1000 || iteration < 11 + return new_point + end + + if all(isapprox(new_point, cuts_point[j]) for j in iteration-10:iteration-1) + return rand([0;1],length(u)) + end + return new_point end @@ -191,7 +201,7 @@ function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_ite add_cut!(upper_model, cuts_intercept[i], cuts_slope[i], cuts_point[i], u) # Add point to the lists - new_point = solve_or_shuffle!(upper_model, u, cuts_point) + new_point = solve_or_shuffle!(upper_model, u, cuts_point, i) push!(cuts_point, new_point) if value(θ) > bound bound = value(θ) @@ -215,7 +225,7 @@ function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_ite upper_bound[i] = u_bound lower_bound[i] = bound gap[i] = abs(bound - u_bound) / u_bound - if i > 10 && gap[i] < atol && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) + if i > 10 && gap[i] < atol # && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) println("Converged") break; else From fbf32215ec3344b3d2e50c0287ef053266f9d09b Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Tue, 9 Jan 2024 17:55:25 -0500 Subject: [PATCH 14/74] update code --- examples/powermodels/power_flow_dataset.jl | 63 +++-- examples/powermodels/unitcommitment.jl | 311 ++++++++------------- src/cutting_planes.jl | 38 +-- src/nn_expression.jl | 2 +- test/nn_expression.jl | 30 ++ test/runtests.jl | 3 + 6 files changed, 205 insertions(+), 242 deletions(-) create mode 100644 test/nn_expression.jl diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl index 042f529..977c5c7 100644 --- a/examples/powermodels/power_flow_dataset.jl +++ b/examples/powermodels/power_flow_dataset.jl @@ -38,13 +38,14 @@ plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr") optimiser=Flux.Optimise.Adam() # Define Model -# model = MultitargetNeuralNetworkRegressor(; -# builder=FullyConnectedBuilder([32, 64]), -# rng=123, -# epochs=10, -# optimiser=optimiser, -# acceleration=CUDALibs(), -# ) +model = MultitargetNeuralNetworkRegressor(; + builder=FullyConnectedBuilder([64, 32, 8]), + rng=123, + epochs=100, + optimiser=optimiser, + acceleration=CUDALibs(), + batch_size=32, +) # Define the machine _vm_fr, _vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"], num_samples), rand(t_bus["vmin"]:0.0001:t_bus["vmax"], num_samples) @@ -52,32 +53,32 @@ _va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), ra X = [_vm_fr _vm_to _va_fr _va_to] y = [i[1] for i in f_owms_val][:,:] -# mach = machine(model, X, y) -# fit!(mach; verbosity=2) +mach = machine(model, X, y) +fit!(mach; verbosity=2) # Make predictions -# predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) - -# scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); -# scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") - -loss = Flux.mse -model = FullyConnected(4, [3], 2) -opt_state = Flux.setup(optimiser, model) -best_model = model -best_loss = 1000000 -for ep in 1:10 - epochloss = L2O.train!(model, loss, opt_state, X', y') - if ep % 100 == 0 - @info("Epoch $ep, loss = $epochloss") - if epochloss < best_loss - best_loss = epochloss - best_model = deepcopy(model) - else - model = deepcopy(best_model) - end - end -end +predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) + +scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); +scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") + +# loss = Flux.mse +# model = FullyConnected(4, [3], 2) +# opt_state = Flux.setup(optimiser, model) +# best_model = model +# best_loss = 1000000 +# for ep in 1:10 +# epochloss = L2O.train!(model, loss, opt_state, X', y') +# if ep % 100 == 0 +# @info("Epoch $ep, loss = $epochloss") +# if epochloss < best_loss +# best_loss = epochloss +# best_model = deepcopy(model) +# else +# model = deepcopy(best_model) +# end +# end +# end function function_ohms_yt_from(::Dict) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index 1528476..a5d98d7 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -1,195 +1,16 @@ -using TestEnv -TestEnv.activate() - -using PowerModels -using PGLib +using LinearAlgebra +using MLJFlux using Gurobi -using Ipopt +using Flux +using MLJ +using L2O using JuMP -using Juniper -using ParametricOptInterface -const POI = ParametricOptInterface -using LinearAlgebra +using CUDA using Logging - -# "pglib_opf_case118_ieee.m" #"pglib_opf_case5_pjm.m" # "300_ieee" -matpower_case_name = "300_ieee" - -network_data = make_basic_network(pglib(matpower_case_name)) - -network_formulation = DCPPowerModel # DCPPowerModel ACPPowerModel - -unique([g["pmin"] for g in values(network_data["gen"])]) - -########################################## -## Branch and Bound -########################################## - -ipopt = optimizer_with_attributes(Ipopt.Optimizer, "print_level"=>0) -# optimizer = optimizer_with_attributes(Juniper.Optimizer, "nl_solver"=>ipopt) - -gurobi_optimizer = Gurobi.Optimizer -jump_model = Model(gurobi_optimizer) - -# Create commit variables -@variable(jump_model, u[i in 1:length(network_data["gen"])] , Bin) - -# Create power flow constraints -pm = PowerModels.instantiate_model(network_data, network_formulation, PowerModels.build_opf; jump_model=jump_model) - -# link generation and commit -pg = PowerModels.var(pm, :pg) -@constraint(jump_model, - pmin[i in 1:length(network_data["gen"])], pg[i] >= u[i] * network_data["gen"]["$i"]["pmax"] * 0.4 -) - -@constraint(jump_model, - pmax[i in 1:length(network_data["gen"])], pg[i] <= u[i] * network_data["gen"]["$i"]["pmax"] -) - -JuMP.optimize!(jump_model) - -termination_status(jump_model) -objective_value(jump_model) - -########################################## -## Cutting Planes -########################################## - -# Set the lower problem - -# inner problem -poi_ipopt = () -> POI.Optimizer(Ipopt.Optimizer()) -# set_optimizer_attribute(poi_ipopt, "print_level", 0) -inner_model = Model(poi_ipopt) - -# Create commit parameters -start_values = zeros(length(network_data["gen"])) -@variable(inner_model, u_inner[i= 1:length(network_data["gen"])] in MOI.Parameter.(start_values)) - -# slack variables -@variable(inner_model, slack[i= 1:length(network_data["gen"])] >= 0) - -# Create power flow constraints -pm = PowerModels.instantiate_model(network_data, network_formulation, PowerModels.build_opf; jump_model=inner_model) - -# link generation and commit -pg = PowerModels.var(pm, :pg) -@constraint(inner_model, - pmin[i in 1:length(network_data["gen"])], pg[i] + slack[i] >= u_inner[i] * network_data["gen"]["$i"]["pmax"] * 0.4 -) - -@constraint(inner_model, - pmax[i in 1:length(network_data["gen"])], pg[i] - slack[i] <= u_inner[i] * network_data["gen"]["$i"]["pmax"] -) - -obj = objective_function(inner_model) -obj = obj + sum(slack) * 1e7 -set_objective(inner_model, MIN_SENSE, obj) - -JuMP.optimize!(inner_model) - -termination_status(inner_model) -primal_status(inner_model) -obj_value = objective_value(inner_model) -duals = [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner] - -# # cut list -# cuts_intercept = [obj_value] -# cuts_slope = [duals] -# cuts_point = [start_values] - -# gurobi_optimizer = optimizer_with_attributes(Gurobi.Optimizer, "OutputFlag" => 0) -# upper_model = Model(gurobi_optimizer) -# # Create commit variables -# @variable(upper_model, u[i in 1:length(network_data["gen"])] , Bin) - -# # cutting planes epigraph variable -# bound = -1e7 -# @variable(upper_model, θ >= bound) - -# # minimize the epigraph variable -# @objective(upper_model, Min, θ) - -# max_iter = 1000 -# i = 1 -# gap = Array{Float64}(undef, max_iter) -# upper_bound = Array{Float64}(undef, max_iter) -# lower_bound = Array{Float64}(undef, max_iter) -# while i <= max_iter -# # Add cuts -# @constraint(upper_model, -# θ >= cuts_intercept[i] + dot(cuts_slope[i], u .- cuts_point[i]) -# ) - -# JuMP.optimize!(upper_model) - -# # Add point to the lists -# if termination_status(upper_model) == MOI.OPTIMAL -# push!(cuts_point, value.(u)) -# bound = objective_value(upper_model) -# else -# println("Upper problem failed") -# break; -# end - -# # run inner problem -# MOI.set.(inner_model, POI.ParameterValue(), u_inner, cuts_point[i+1]) -# JuMP.optimize!(inner_model) - -# # Add cut to the lists -# if termination_status(inner_model) == MOI.OPTIMAL || termination_status(inner_model) == MOI.LOCALLY_SOLVED -# push!(cuts_intercept, objective_value(inner_model)) -# push!(cuts_slope, [MOI.get(inner_model, POI.ParameterDual(), u_i) for u_i in u_inner]) -# else -# println("Inner problem failed") -# break; -# end - -# # test convergence -# u_bound = minimum(cuts_intercept) -# upper_bound[i] = u_bound -# lower_bound[i] = bound -# gap[i] = abs(bound - u_bound) / u_bound -# if i > 10 && gap[i] < 0.1 && all([all(cuts_point[i] .== cuts_point[j]) for j in i-10:i-1]) -# println("Converged") -# break; -# else -# @info "Iteration $i" bound cuts_intercept[i] -# end -# i += 1 -# end -# i = ifelse(i >= max_iter, max_iter, i) - -gap = gap[1:i] -upper_bound = upper_bound[1:i] -lower_bound = lower_bound[1:i] - -using Plots - -# Plot upper and lower bounds -plt = plot(2:i, upper_bound[2:i], label="Upper bound", title="Case $matpower_case_name", xlabel="Iteration", ylabel="Objective value"); -plot!(plt, 2:i, lower_bound[2:i], label="Lower bound") - - -########### - -# function add_deficit!(model; penalty=1e7) -# @variable(model, deficit[1:length(model[:eq_power_balance])]) -# @variable(model, norm_deficit) -# for (i, eq) in model[:eq_power_balance] -# set_normalized_coefficient(eq, deficit[i], 1) -# end -# @constraint(model, [norm_deficit; deficit] in MOI.NormOneCone(1 + length(deficit))) -# set_objective_coefficient(model, norm_deficit, penalty) -# return norm_deficit -# end - -using HiGHS +using HiGHS, Gurobi using JuMP using UnitCommitment import ParametricOptInterface as POI -using LinearAlgebra import UnitCommitment: Formulation, @@ -199,19 +20,127 @@ import UnitCommitment: # Read benchmark instance instance = UnitCommitment.read_benchmark( - "matpower/case14/2017-01-01", + "matpower/case300/2017-01-01", ) instance.time = 15 -inner_solver = () -> POI.Optimizer(HiGHS.Optimizer()) -upper_solver = HiGHS.Optimizer +inner_solver = () -> POI.Optimizer(Gurobi.Optimizer()) +upper_solver = Gurobi.Optimizer # Construct model (using state-of-the-art defaults) model = UnitCommitment.build_model( instance = instance, - optimizer = inner_solver, + optimizer = upper_solver, ) +set_optimizer_attribute(model, "PoolSearchMode", 2) +set_optimizer_attribute(model, "PoolSolutions", 100) + +bin_vars = [val for val in values(model[:is_on])] +num_bin_var = length(bin_vars) +num_all_var = num_variables(model) +global my_storage_vars = [] +global my_storage_obj = [] +function my_callback_function(cb_data, cb_where::Cint) + # You can select where the callback is run + if cb_where == GRB_CB_MIPNODE + # resultP = Vector{Cdouble}(undef, num_all_var) + # GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) + # push!(my_storage_vars, resultP) + return + end + if cb_where == GRB_CB_MIPSOL || cb_where == GRB_CB_MIPNODE + # Before querying `callback_value`, you must call: + Gurobi.load_callback_variable_primal(cb_data, cb_where) + # Get the values of the variables + x = [callback_value(cb_data, var) for var in bin_vars] + # push + push!(my_storage_vars, x) + # Get the objective value + obj = Ref{Cdouble}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) + # push + push!(my_storage_obj, obj[]) + return + end + return +end +MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) + +# JuMP.optimize!(model) +UnitCommitment.optimize!(model) + +true_ob_value = objective_value(model) +true_sol = value.(bin_vars) # Solve model using cutting plane algorithm include("src/cutting_planes.jl") -upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) \ No newline at end of file +# upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) + +optimiser=Flux.Optimise.Adam() +nn = MultitargetNeuralNetworkRegressor(; + builder=FullyConnectedBuilder([8, 8, 8]), + rng=123, + epochs=100, + optimiser=optimiser, + acceleration=CUDALibs(), + batch_size=32, +) + +# Define the machine +X = hcat(my_storage_vars...)'[:,:] +y = convert.(Float64, my_storage_obj[:,:]) + +# constrols + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + +update_loss(loss) = push!(losses, loss) +update_training_loss(report) = + push!(training_losses, + report.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) + +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(1/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), + WithIterationsDo(update_epochs) +] + +iterated_pipe = + IteratedModel(model=nn, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2 +) + +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach; verbosity=2) + +# Make predictions +predictions = convert.(Float64, predict(mach, X)) +error = abs.(y .- predictions) ./ y +mean(error) + +# Get upper model +upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) + +# add nn to model +bin_vars_upper = [inner_2_upper_map[var] for var in values(model[:is_on])] +obj = mach.fitresult.fitresult[1](bin_vars_upper) + +aux = @variable(upper_model) +@constraint(upper_model, obj[1] <= aux) +@constraint(upper_model, aux >= 0.0) +@objective(upper_model, Min, aux) + +set_optimizer(upper_model, upper_solver) +JuMP.optimize!(upper_model) \ No newline at end of file diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl index 7f32edb..29c0125 100644 --- a/src/cutting_planes.jl +++ b/src/cutting_planes.jl @@ -1,11 +1,11 @@ using JuMP """ - all_binary_variables(m::Model) + all_binary_variables(m::JuMP.Model) Return a list of all binary variables in the model `m`. """ -function all_binary_variables(m::Model) +function all_binary_variables(m::JuMP.Model) return all_variables(m)[is_binary.(all_variables(m))] end @@ -19,12 +19,12 @@ function variables(con::ConstraintRef) end """ - all_binary_constraints(m::Model) + all_binary_constraints(m::JuMP.Model) Return a list of all constraints in the model `m` containing only binary variables. """ -function all_binary_constraints(m::Model) +function all_binary_constraints(m::JuMP.Model) all_cons_types = list_of_constraint_types(m) consrefs = [] for con_type in all_cons_types @@ -39,20 +39,20 @@ function all_binary_constraints(m::Model) end # Copy variables -function copy_binary_variables(m_to::Model, m_from::Model) +function copy_binary_variables(m_to::JuMP.Model, m_from::JuMP.Model) mapping = Dict() for var in all_binary_variables(m_from) - ref_to = @variable(m_to, base_name = name(var), binary = true) + ref_to = @variable(m_to, base_name = JuMP.name(var), binary = true) mapping[var] = ref_to end return mapping end # Copy constraints -function copy_binary_constraint(::Model, ::VariableRef, set, var_mapping) +function copy_binary_constraint(::JuMP.Model, ::VariableRef, set, var_mapping) return nothing end -function copy_binary_constraint(m_to::Model, func::AffExpr, set, var_mapping) +function copy_binary_constraint(m_to::JuMP.Model, func::AffExpr, set, var_mapping) terms_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() for var in keys(func.terms) terms_dict[var_mapping[var]] = func.terms[var] @@ -61,7 +61,7 @@ function copy_binary_constraint(m_to::Model, func::AffExpr, set, var_mapping) return @constraint(m_to, exp in set) end -function copy_binary_constraints(m_to::Model, m_from::Model, var_mapping::Dict) +function copy_binary_constraints(m_to::JuMP.Model, m_from::JuMP.Model, var_mapping::Dict) cons_to = [] for con in all_binary_constraints(m_from) con_exp = constraint_object(con) @@ -72,7 +72,7 @@ function copy_binary_constraints(m_to::Model, m_from::Model, var_mapping::Dict) end # Copy objective function -function copy_binary_objective(m_to::Model, m_from::Model, var_mapping::Dict) +function copy_binary_objective(m_to::JuMP.Model, m_from::JuMP.Model, var_mapping::Dict) obj = objective_function(m_from) obj2_dict = JuMP.OrderedCollections.OrderedDict{VariableRef, Float64}() for var in keys(obj.terms) @@ -85,8 +85,8 @@ function copy_binary_objective(m_to::Model, m_from::Model, var_mapping::Dict) end # Copy binary model -function copy_binary_model(m_from::Model) - m_to = Model() +function copy_binary_model(m_from::JuMP.Model) + m_to = JuMP.Model() var_mapping = copy_binary_variables(m_to, m_from) cons_mapping = copy_binary_constraints(m_to, m_from, var_mapping) copy_binary_objective(m_to, m_from, var_mapping) @@ -94,7 +94,7 @@ function copy_binary_model(m_from::Model) end # remove binary terms -function delete_binary_terms!(m::Model) +function delete_binary_terms!(m::JuMP.Model) obj = objective_function(m) for var in keys(obj.terms) if is_binary(var) @@ -111,7 +111,7 @@ function delete_binary_terms!(m::Model) end end -function add_deficit_constraints!(model::Model; penalty=1e7) +function add_deficit_constraints!(model::JuMP.Model; penalty=1e7) consrefs = [con for con in all_constraints(model, include_variable_in_set_constraints=false)] @variable(model, deficit[1:length(consrefs)]) @variable(model, norm_deficit) @@ -124,7 +124,7 @@ function add_deficit_constraints!(model::Model; penalty=1e7) end # fix binary variables to POI parameters -function fix_binary_variables!(inner_model::Model, inner_2_upper_map::Dict) +function fix_binary_variables!(inner_model::JuMP.Model, inner_2_upper_map::Dict) var_mapping = Dict() for (to_var, from_var) in inner_2_upper_map param = @variable(inner_model, set = MOI.Parameter(0.0)) @@ -138,20 +138,20 @@ function fix_binary_variables!(inner_model::Model, inner_2_upper_map::Dict) end # set binary variables -function set_binary_variables!(inner_model::Model, u_inner, vals) +function set_binary_variables!(inner_model::JuMP.Model, u_inner, vals) for (i, to_var) in enumerate(u_inner) MOI.set(inner_model, POI.ParameterValue(), to_var, vals[i]) end end # add cut -function add_cut!(upper_model::Model, cut_intercept, cut_slope, cut_point, u) +function add_cut!(upper_model::JuMP.Model, cut_intercept, cut_slope, cut_point, u) @constraint(upper_model, upper_model[:θ] >= cut_intercept + dot(cut_slope, u .- cut_point) ) end -function solve_or_shuffle!(model::Model, u, cuts_point, iteration) +function solve_or_shuffle!(model::JuMP.Model, u, cuts_point, iteration) JuMP.optimize!(model) if termination_status(model) != MOI.OPTIMAL @@ -170,7 +170,7 @@ function solve_or_shuffle!(model::Model, u, cuts_point, iteration) return new_point end -function cutting_planes!(inner_model::Model; upper_solver, inner_solver, max_iter::Int=4000, atol::Real=0.1, bound::Real=0.0) +function cutting_planes!(inner_model::JuMP.Model; upper_solver, inner_solver, max_iter::Int=4000, atol::Real=0.1, bound::Real=0.0) upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) delete_binary_terms!(inner_model) add_deficit_constraints!(inner_model) diff --git a/src/nn_expression.jl b/src/nn_expression.jl index 0ec8869..df5680b 100644 --- a/src/nn_expression.jl +++ b/src/nn_expression.jl @@ -2,7 +2,7 @@ using JuMP import NNlib: relu -big_M = 1000.0 +big_M = 1e10 function NNlib.relu(ex::AffExpr) model = owner_model(ex) diff --git a/test/nn_expression.jl b/test/nn_expression.jl new file mode 100644 index 0000000..879cbbb --- /dev/null +++ b/test/nn_expression.jl @@ -0,0 +1,30 @@ +""" + test_flux_jump() + +Tests running a jump model with a flux expression. +""" +function test_flux_jump() + for i in 1:10 + model = JuMP.Model(HiGHS.Optimizer) + + @variable(model, x[i = 1:3]>= 2.3) + + flux_model = Chain(Dense(3, 3, relu), Dense(3, 1)) + + ex = flux_model(x)[1] + + # @constraint(model, ex >= -100.0) + @constraint(model, sum(x) <= 10) + + @objective(model, Min, ex) + + JuMP.optimize!(model) + + @test termination_status(model) === OPTIMAL + if flux_model(value.(x))[1] <= 1.0 + @test isapprox(flux_model(value.(x))[1], value(ex); atol=0.01) + else + @test isapprox(flux_model(value.(x))[1], value(ex); rtol=0.001) + end + end +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 3b70dcf..4a46497 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -27,8 +27,11 @@ include(joinpath(examples_dir, "powermodels", "pglib_datagen.jl")) include(joinpath(test_dir, "test_flux_forecaster.jl")) +include(joinpath(test_dir, "nn_expression.jl")) + @testset "L2O.jl" begin mktempdir() do path + test_flux_jump() test_problem_iterator(path) test_worst_case_problem_iterator(path) file_in, file_out = test_pglib_datasetgen(path, "pglib_opf_case5_pjm", 20) From 32f3cd9d81245dd901beb187fa3ac04cd7381ab8 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Wed, 10 Jan 2024 14:47:25 -0500 Subject: [PATCH 15/74] rm unwanted code --- examples/powermodels/power_flow_dataset.jl | 100 --------------- examples/powermodels/powermodels.jl | 141 --------------------- src/nn_expression.jl | 47 ------- 3 files changed, 288 deletions(-) delete mode 100644 examples/powermodels/power_flow_dataset.jl delete mode 100644 examples/powermodels/powermodels.jl diff --git a/examples/powermodels/power_flow_dataset.jl b/examples/powermodels/power_flow_dataset.jl deleted file mode 100644 index 977c5c7..0000000 --- a/examples/powermodels/power_flow_dataset.jl +++ /dev/null @@ -1,100 +0,0 @@ -using TestEnv -TestEnv.activate() - -using PowerModels -using PGLib -using MLJFlux -using Gurobi -using Flux -using MLJ -using L2O -using JuMP -using CUDA -using Logging - -include("examples/powermodels/powermodels.jl") - -matpower_case_name = "pglib_opf_case5_pjm" - -network_data = make_basic_network(pglib(matpower_case_name)) - -branch = network_data["branch"]["2"] -f_bus_index = branch["f_bus"] -f_bus = network_data["bus"]["$f_bus_index"] -t_bus_index = branch["t_bus"] -t_bus = network_data["bus"]["$t_bus_index"] - -f_owms = function_ohms_yt_from(branch) - -num_samples = 1000 - -vm_fr, vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"]), rand(t_bus["vmin"]:0.0001:t_bus["vmax"]) -va_fr, va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) -a_diff = va_fr - va_to - -using Plots -f_owms_val = f_owms.(vm_fr, vm_to, va_fr, va_to) -plt = scatter(a_diff, [i[1] for i in f_owms_val], label="p_fr", xlabel="θ_fr - θ_to", ylabel="flow", legend=:outertopright); -scatter!(plt, a_diff, [i[2] for i in f_owms_val], label="q_fr") -optimiser=Flux.Optimise.Adam() -# Define Model -model = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([64, 32, 8]), - rng=123, - epochs=100, - optimiser=optimiser, - acceleration=CUDALibs(), - batch_size=32, -) - -# Define the machine -_vm_fr, _vm_to = rand(f_bus["vmin"]:0.0001:f_bus["vmax"], num_samples), rand(t_bus["vmin"]:0.0001:t_bus["vmax"], num_samples) -_va_fr, _va_to = rand(branch["angmin"]:0.0001:branch["angmax"], num_samples), rand(branch["angmin"]:0.0001:branch["angmax"], num_samples) -X = [_vm_fr _vm_to _va_fr _va_to] -y = [i[1] for i in f_owms_val][:,:] - -mach = machine(model, X, y) -fit!(mach; verbosity=2) - -# Make predictions -predictions = predict(mach, [fill(vm_fr, num_samples) fill(vm_to, num_samples) va_fr va_to]) - -scatter!(plt, a_diff, predictions[:,1], label="p_fr_pred"); -scatter!(plt, a_diff, predictions[:,2], label="q_fr_pred") - -# loss = Flux.mse -# model = FullyConnected(4, [3], 2) -# opt_state = Flux.setup(optimiser, model) -# best_model = model -# best_loss = 1000000 -# for ep in 1:10 -# epochloss = L2O.train!(model, loss, opt_state, X', y') -# if ep % 100 == 0 -# @info("Epoch $ep, loss = $epochloss") -# if epochloss < best_loss -# best_loss = epochloss -# best_model = deepcopy(model) -# else -# model = deepcopy(best_model) -# end -# end -# end - - -function function_ohms_yt_from(::Dict) - return (vm_fr, vm_to, va_fr, va_to) -> model([vm_fr, vm_to, va_fr, va_to]) -end - -function function_ohms_yt_to(branch::Dict) - return (vm_fr, vm_to, va_fr, va_to) -> model([vm_fr, vm_to, va_fr, va_to]) -end - -pm = instantiate_model( - network_data, - ACPPowerModel, - PowerModels.build_opf; - setting=Dict("output" => Dict("branch_flows" => true, "duals" => true)), -) - -# solve -result = optimize_model!(pm, optimizer=Gurobi.Optimizer) \ No newline at end of file diff --git a/examples/powermodels/powermodels.jl b/examples/powermodels/powermodels.jl deleted file mode 100644 index a221dca..0000000 --- a/examples/powermodels/powermodels.jl +++ /dev/null @@ -1,141 +0,0 @@ -import PowerModels: constraint_ohms_yt_from, constraint_ohms_yt_to, constraint_ohms_y_from, constraint_ohms_y_to - -function _function_ohms_yt_from(branch::Dict) - g, b = calc_branch_y(branch) - tr, ti = calc_branch_t(branch) - g_fr = branch["g_fr"] - b_fr = branch["b_fr"] - tm = branch["tap"] - - return (vm_fr, vm_to, va_fr, va_to) -> ((g+g_fr)/tm^2*vm_fr^2 + (-g*tr+b*ti)/tm^2*(vm_fr*vm_to*cos(va_fr-va_to)) + (-b*tr-g*ti)/tm^2*(vm_fr*vm_to*sin(va_fr-va_to)), # from - -(b+b_fr)/tm^2*vm_fr^2 - (-b*tr-g*ti)/tm^2*(vm_fr*vm_to*cos(va_fr-va_to)) + (-g*tr+b*ti)/tm^2*(vm_fr*vm_to*sin(va_fr-va_to))) # to - -end - -function function_ohms_yt_from(branch::Dict) - _function_ohms_yt_from(branch) -end - -function PowerModels.constraint_ohms_yt_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) - branch = ref(pm, nw, :branch, i) - f_bus = branch["f_bus"] - t_bus = branch["t_bus"] - f_idx = (i, f_bus, t_bus) - - p_fr = var(pm, nw, :p, f_idx) - q_fr = var(pm, nw, :q, f_idx) - vm_fr = var(pm, nw, :vm, f_bus) - vm_to = var(pm, nw, :vm, t_bus) - va_fr = var(pm, nw, :va, f_bus) - va_to = var(pm, nw, :va, t_bus) - - f_owms = function_ohms_yt_from(branch) - f_owms_p, f_owms_q = f_owms(vm_fr, vm_to, va_fr, va_to) - - #constraint_ohms_yt_from(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_fr, b_fr, tr, ti, tm) - JuMP.@constraint(pm.model, p_fr == f_owms_p) # @NL - JuMP.@constraint(pm.model, q_fr == f_owms_q) # @NL -end - -function _function_ohms_yt_to(branch::Dict) - g, b = calc_branch_y(branch) - tr, ti = calc_branch_t(branch) - g_to = branch["g_to"] - b_to = branch["b_to"] - tm = branch["tap"] - - return (vm_fr, vm_to, va_fr, va_to) -> ((g+g_to)*vm_to^2 + (-g*tr-b*ti)/tm^2*(vm_to*vm_fr*cos(va_to-va_fr)) + (-b*tr+g*ti)/tm^2*(vm_to*vm_fr*sin(va_to-va_fr)), # from - -(b+b_to)*vm_to^2 - (-b*tr+g*ti)/tm^2*(vm_to*vm_fr*cos(va_to-va_fr)) + (-g*tr-b*ti)/tm^2*(vm_to*vm_fr*sin(va_to-va_fr))) # to - -end - -function function_ohms_yt_to(branch::Dict) - _function_ohms_yt_to(branch) -end - -function PowerModels.constraint_ohms_yt_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) - branch = ref(pm, nw, :branch, i) - t_bus = branch["t_bus"] - f_bus = branch["f_bus"] - t_idx = (i, t_bus, f_bus) - - p_to = var(pm, nw, :p, t_idx) - q_to = var(pm, nw, :q, t_idx) - vm_fr = var(pm, nw, :vm, f_bus) - vm_to = var(pm, nw, :vm, t_bus) - va_fr = var(pm, nw, :va, f_bus) - va_to = var(pm, nw, :va, t_bus) - - f_owms = function_ohms_yt_to(branch) - f_owms_p, f_owms_q = f_owms(vm_fr, vm_to, va_fr, va_to) - - # constraint_ohms_yt_to(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_to, b_to, tr, ti, tm) - JuMP.@constraint(pm.model, p_to == f_owms_p) # @NL - JuMP.@constraint(pm.model, q_to == f_owms_q) # @NL -end - -# function function_ohms_y_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) -# branch = ref(pm, nw, :branch, i) -# f_bus = branch["f_bus"] -# t_bus = branch["t_bus"] -# f_idx = (i, f_bus, t_bus) -# t_idx = (i, t_bus, f_bus) - -# g, b = calc_branch_y(branch) -# g_fr = branch["g_fr"] -# b_fr = branch["b_fr"] -# tm = branch["tap"] -# ta = branch["shift"] - -# return (vm_fr, vm_to, va_fr, va_to) -> (g+g_fr)*(vm_fr/tm)^2 - g*vm_fr/tm*vm_to*cos(va_fr-va_to-ta) + -b*vm_fr/tm*vm_to*sin(va_fr-va_to-ta), # from -# (vm_fr, vm_to, va_fr, va_to) -> -(b+b_fr)*(vm_fr/tm)^2 + b*vm_fr/tm*vm_to*cos(va_fr-va_to-ta) + -g*vm_fr/tm*vm_to*sin(va_fr-va_to-ta) # to - -# end - -# function constraint_ohms_y_from(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) -# p_fr = var(pm, nw, :p, f_idx) -# q_fr = var(pm, nw, :q, f_idx) -# vm_fr = var(pm, nw, :vm, f_bus) -# vm_to = var(pm, nw, :vm, t_bus) -# va_fr = var(pm, nw, :va, f_bus) -# va_to = var(pm, nw, :va, t_bus) - -# f_owms = function_ohms_y_from(pm, i; nw=nw) - -# # constraint_ohms_y_from(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_fr, b_fr, tm, ta) -# JuMP.constraint(pm.model, p_fr == f_owms[1] # @NL(vm_fr, vm_to, va_fr, va_to)) -# JuMP.constraint(pm.model, q_fr == f_owms[2] # @NL(vm_fr, vm_to, va_fr, va_to)) -# end - -# function function_ohms_y_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) -# branch = ref(pm, nw, :branch, i) -# f_bus = branch["f_bus"] -# t_bus = branch["t_bus"] -# f_idx = (i, f_bus, t_bus) -# t_idx = (i, t_bus, f_bus) - -# g, b = calc_branch_y(branch) -# g_to = branch["g_to"] -# b_to = branch["b_to"] -# tm = branch["tap"] -# ta = branch["shift"] - -# return (vm_fr, vm_to, va_fr, va_to) -> (g+g_to)*vm_to^2 - g*vm_to*vm_fr/tm*cos(va_to-va_fr+ta) + -b*vm_to*vm_fr/tm*sin(va_to-va_fr+ta), # from -# (vm_fr, vm_to, va_fr, va_to) -> -(b+b_to)*vm_to^2 + b*vm_to*vm_fr/tm*cos(va_to-va_fr+ta) + -g*vm_to*vm_fr/tm*sin(va_to-va_fr+ta) # to - -# end - -# function constraint_ohms_y_to(pm::AbstractACPModel, i::Int; nw::Int=nw_id_default) -# p_to = var(pm, nw, :p, t_idx) -# q_to = var(pm, nw, :q, t_idx) -# vm_fr = var(pm, nw, :vm, f_bus) -# vm_to = var(pm, nw, :vm, t_bus) -# va_fr = var(pm, nw, :va, f_bus) -# va_to = var(pm, nw, :va, t_bus) - -# f_owms = function_ohms_y_to(pm, i; nw=nw) - -# # constraint_ohms_y_to(pm, nw, f_bus, t_bus, f_idx, t_idx, g, b, g_to, b_to, tm, ta) -# JuMP.constraint(pm.model, p_to == f_owms[1] # @NL(vm_fr, vm_to, va_fr, va_to)) -# JuMP.constraint(pm.model, q_to == f_owms[2] # @NL(vm_fr, vm_to, va_fr, va_to)) -# end diff --git a/src/nn_expression.jl b/src/nn_expression.jl index df5680b..48f2da8 100644 --- a/src/nn_expression.jl +++ b/src/nn_expression.jl @@ -14,51 +14,4 @@ function NNlib.relu(ex::AffExpr) return relu_out end -# function build_test_nlp_model() -# model = Model() - -# @variable(model, x[i = 1:2]); - -# @variable(model, y[i = 1:2] >= 0.0); - -# ex1 = sin(x[1]) -# ex2 = cos(x[2]) - -# cons = @NLconstraint(model, ex1 == ex2) - -# @objective(model, Min, sum(x) + sum(y)) - -# return model, cons -# end - -# function constraints_nlp_evaluator(model, x) -# d = NLPEvaluator(model) -# MOI.initialize(d, [:ExprGraph]) - -# f = zeros(length(model.nlp_model.constraints)) - -# MOI.eval_constraint(d, f, x) - -# return f -# end - -# model, cons = build_test_nlp_model() - -# constraints_nlp_evaluator(model, [1.0]) - -# ################ - -# flux_model = Chain(Dense(3, 3, relu), Dense(3, 1)) - -# ex = flux_model(x)[1] - -# cons = @constraint(model, ex == 1) - - - -# optimize!(model) - -# println(value.(x)) - -# value(cons) From 8a025982d6c0c3d5a432b01f9ef9dba802341ac1 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Wed, 10 Jan 2024 16:42:57 -0500 Subject: [PATCH 16/74] working code finally --- Project.toml | 1 + examples/powermodels/unitcommitment.jl | 13 +++-- src/nn_expression.jl | 5 +- test/nn_expression.jl | 67 ++++++++++++++++++++++++-- test/runtests.jl | 2 +- 5 files changed, 79 insertions(+), 9 deletions(-) diff --git a/Project.toml b/Project.toml index cec6647..6aaa13f 100644 --- a/Project.toml +++ b/Project.toml @@ -25,6 +25,7 @@ CSV = "0.10" Dualization = "0.5" JuMP = "1" ParametricOptInterface = "0.7" +Zygote ="^0.6.68" julia = "1.6" [extras] diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index a5d98d7..9f42a77 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -45,9 +45,9 @@ function my_callback_function(cb_data, cb_where::Cint) # resultP = Vector{Cdouble}(undef, num_all_var) # GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) # push!(my_storage_vars, resultP) - return + # return end - if cb_where == GRB_CB_MIPSOL || cb_where == GRB_CB_MIPNODE + if cb_where == GRB_CB_MIPSOL # Before querying `callback_value`, you must call: Gurobi.load_callback_variable_primal(cb_data, cb_where) # Get the values of the variables @@ -143,4 +143,11 @@ aux = @variable(upper_model) @objective(upper_model, Min, aux) set_optimizer(upper_model, upper_solver) -JuMP.optimize!(upper_model) \ No newline at end of file +JuMP.optimize!(upper_model) + +termination_status(upper_model) +value.(bin_vars_upper) +objective_value(upper_model) + +mach.fitresult.fitresult[1](value.(bin_vars_upper)) +(value(obj[1]) - true_ob_value) / true_ob_value \ No newline at end of file diff --git a/src/nn_expression.jl b/src/nn_expression.jl index 48f2da8..7807508 100644 --- a/src/nn_expression.jl +++ b/src/nn_expression.jl @@ -5,11 +5,12 @@ import NNlib: relu big_M = 1e10 function NNlib.relu(ex::AffExpr) + tol = 0.00001 model = owner_model(ex) aux = @variable(model, binary = true) relu_out = @variable(model, lower_bound = 0.0) - @constraint(model, relu_out >= ex) - @constraint(model, relu_out <= ex + big_M * (1 - aux)) + @constraint(model, relu_out >= ex * (1-tol)) + @constraint(model, relu_out <= ex * (1+tol) + big_M * (1 - aux)) @constraint(model, relu_out <= big_M * aux) return relu_out end diff --git a/test/nn_expression.jl b/test/nn_expression.jl index 879cbbb..5617a52 100644 --- a/test/nn_expression.jl +++ b/test/nn_expression.jl @@ -1,9 +1,9 @@ """ - test_flux_jump() + test_flux_jump_basic() Tests running a jump model with a flux expression. """ -function test_flux_jump() +function test_flux_jump_basic() for i in 1:10 model = JuMP.Model(HiGHS.Optimizer) @@ -27,4 +27,65 @@ function test_flux_jump() @test isapprox(flux_model(value.(x))[1], value(ex); rtol=0.001) end end -end \ No newline at end of file +end + +""" + test_FullyConnected_jump() + +Tests running a jump model with a FullyConnected Network expression. +""" +function test_FullyConnected_jump() + for i in 1:10 + X = rand(100, 3) + Y = rand(100, 1) + + nn = MultitargetNeuralNetworkRegressor(; + builder=FullyConnectedBuilder([8, 8, 8]), + rng=123, + epochs=100, + optimiser=optimiser, + acceleration=CUDALibs(), + batch_size=32, + ) + + mach = machine(nn, X, y) + fit!(mach; verbosity=2) + + flux_model = mach.fitresult[1] + + model = JuMP.Model(Gurobi.Optimizer) + + @variable(model, x[i = 1:3]>= 2.3) + + ex = flux_model(x)[1] + + # @constraint(model, ex >= -100.0) + @constraint(model, sum(x) <= 10) + + @objective(model, Min, ex) + + JuMP.optimize!(model) + + @test termination_status(model) === OPTIMAL + if flux_model(value.(x))[1] <= 1.0 + @test isapprox(flux_model(value.(x))[1], value(ex); atol=0.01) + else + @test isapprox(flux_model(value.(x))[1], value(ex); rtol=0.001) + end + end +end + +function print_conflict!(model) + JuMP.compute_conflict!(model) + ctypes = list_of_constraint_types(model) + for (F, S) in ctypes + cons = all_constraints(model, F, S) + for i in eachindex(cons) + isassigned(cons, i) || continue + con = cons[i] + cst = MOI.get(model, MOI.ConstraintConflictStatus(), con) + cst == MOI.IN_CONFLICT && @info JuMP.name(con) con + end + end + return nothing +end diff --git a/test/runtests.jl b/test/runtests.jl index 4a46497..e1104ea 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -31,7 +31,7 @@ include(joinpath(test_dir, "nn_expression.jl")) @testset "L2O.jl" begin mktempdir() do path - test_flux_jump() + test_flux_jump_basic() test_problem_iterator(path) test_worst_case_problem_iterator(path) file_in, file_out = test_pglib_datasetgen(path, "pglib_opf_case5_pjm", 20) From 61576c744ac9b2aabbbce9ca4cc9ecf5e3e64d34 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 09:47:51 -0500 Subject: [PATCH 17/74] relax solutions working --- examples/powermodels/unitcommitment.jl | 38 ++++++++++++++++++++------ 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index 9f42a77..d502cc3 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -22,7 +22,7 @@ import UnitCommitment: instance = UnitCommitment.read_benchmark( "matpower/case300/2017-01-01", ) -instance.time = 15 +instance.time = 2 inner_solver = () -> POI.Optimizer(Gurobi.Optimizer()) upper_solver = Gurobi.Optimizer @@ -35,17 +35,34 @@ set_optimizer_attribute(model, "PoolSearchMode", 2) set_optimizer_attribute(model, "PoolSolutions", 100) bin_vars = [val for val in values(model[:is_on])] +obj_terms = objective_function(model).terms +obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] +# gurobi_indexes = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] num_bin_var = length(bin_vars) num_all_var = num_variables(model) global my_storage_vars = [] global my_storage_obj = [] +global is_relaxed = [] +global non_optimals = [] function my_callback_function(cb_data, cb_where::Cint) # You can select where the callback is run if cb_where == GRB_CB_MIPNODE - # resultP = Vector{Cdouble}(undef, num_all_var) - # GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) - # push!(my_storage_vars, resultP) - # return + resultobj = Ref{Cint}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_STATUS, resultobj) + if resultobj[] != GRB_OPTIMAL + push!(non_optimals, resultobj[]) + return # Solution is something other than optimal. + end + gurobi_indexes_all = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[var.index]) for var in all_variables(model) if haskey(obj_terms, var)] + gurobi_indexes_bin = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] + resultP = Vector{Cdouble}(undef, num_all_var) + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) + push!(my_storage_vars, resultP[gurobi_indexes_bin]) + # Get the objective value + push!(my_storage_obj, dot(obj_terms_gurobi, resultP[gurobi_indexes_all])) + # mark as relaxed + push!(is_relaxed, 1) + return end if cb_where == GRB_CB_MIPSOL # Before querying `callback_value`, you must call: @@ -59,6 +76,8 @@ function my_callback_function(cb_data, cb_where::Cint) GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) # push push!(my_storage_obj, obj[]) + # mark as not relaxed + push!(is_relaxed, 0) return end return @@ -67,6 +86,7 @@ MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) # JuMP.optimize!(model) UnitCommitment.optimize!(model) +is_relaxed = findall(x -> x == 1, is_relaxed) true_ob_value = objective_value(model) true_sol = value.(bin_vars) @@ -78,7 +98,7 @@ include("src/cutting_planes.jl") optimiser=Flux.Optimise.Adam() nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([8, 8, 8]), + builder=FullyConnectedBuilder([64, 32]), rng=123, epochs=100, optimiser=optimiser, @@ -108,7 +128,7 @@ update_epochs(epoch) = push!(epochs, epoch) controls=[Step(1), NumberSinceBest(6), InvalidValue(), - TimeLimit(1/60), + TimeLimit(5/60), WithLossDo(update_loss), WithReportDo(update_training_loss), WithIterationsDo(update_epochs) @@ -118,7 +138,7 @@ iterated_pipe = IteratedModel(model=nn, controls=controls, resampling=Holdout(fraction_train=0.8), - measure = l2 + measure = Flux.mse ) clear() @@ -150,4 +170,4 @@ value.(bin_vars_upper) objective_value(upper_model) mach.fitresult.fitresult[1](value.(bin_vars_upper)) -(value(obj[1]) - true_ob_value) / true_ob_value \ No newline at end of file +abs(value(obj[1]) - true_ob_value) / true_ob_value \ No newline at end of file From c6f96b8017b9d98737712e71accf0ee57bddfffb Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 10:48:57 -0500 Subject: [PATCH 18/74] update best set-up --- examples/powermodels/unitcommitment.jl | 89 +++++++++++++++++++++----- 1 file changed, 72 insertions(+), 17 deletions(-) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index d502cc3..c818645 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -1,3 +1,11 @@ +################################################### +############## Unit Commitment Model ############## +################################################### + +############## +# Load Packages +############## + using LinearAlgebra using MLJFlux using Gurobi @@ -18,28 +26,44 @@ import UnitCommitment: MorLatRam2013, ShiftFactorsFormulation -# Read benchmark instance +include("src/cutting_planes.jl") + +############## +# Solver +############## + +inner_solver = () -> POI.Optimizer(Gurobi.Optimizer()) +upper_solver = Gurobi.Optimizer + +############## +# Load Instance +############## + instance = UnitCommitment.read_benchmark( "matpower/case300/2017-01-01", ) instance.time = 2 -inner_solver = () -> POI.Optimizer(Gurobi.Optimizer()) -upper_solver = Gurobi.Optimizer # Construct model (using state-of-the-art defaults) model = UnitCommitment.build_model( instance = instance, optimizer = upper_solver, ) + +# Set solver attributes set_optimizer_attribute(model, "PoolSearchMode", 2) set_optimizer_attribute(model, "PoolSolutions", 100) -bin_vars = [val for val in values(model[:is_on])] +############## +# Solve and store solutions +############## + +bin_vars = all_binary_variables(model) obj_terms = objective_function(model).terms obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] -# gurobi_indexes = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] num_bin_var = length(bin_vars) num_all_var = num_variables(model) + global my_storage_vars = [] global my_storage_obj = [] global is_relaxed = [] @@ -92,18 +116,23 @@ true_ob_value = objective_value(model) true_sol = value.(bin_vars) # Solve model using cutting plane algorithm -include("src/cutting_planes.jl") - # upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) -optimiser=Flux.Optimise.Adam() +############## +# Fit DNN approximator +############## + +# optimiser=Flux.Optimise.Adam() +optimiser=ConvexRule( + Flux.Optimise.Adam(0.001, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) +) nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([64, 32]), + builder=FullyConnectedBuilder([1024, 300, 64, 32]), # [1024, 300, 64, 32] rng=123, epochs=100, optimiser=optimiser, acceleration=CUDALibs(), - batch_size=32, + batch_size=24, ) # Define the machine @@ -119,7 +148,10 @@ clear() = begin return nothing end -update_loss(loss) = push!(losses, loss) +function update_loss(loss) + @info "Loss: $loss" + push!(losses, loss) +end update_training_loss(report) = push!(training_losses, report.training_losses[end]) @@ -137,8 +169,8 @@ controls=[Step(1), iterated_pipe = IteratedModel(model=nn, controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = Flux.mse + resampling=Holdout(fraction_train=0.7), + measure = Flux.mse, ) clear() @@ -150,11 +182,22 @@ predictions = convert.(Float64, predict(mach, X)) error = abs.(y .- predictions) ./ y mean(error) +############## +# Solve using DNN approximator +############## + +function NNlib.relu(ex::AffExpr) + model = owner_model(ex) + relu_out = @variable(model, lower_bound = 0.0) + @constraint(model, relu_out >= ex) + return relu_out +end + # Get upper model upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) # add nn to model -bin_vars_upper = [inner_2_upper_map[var] for var in values(model[:is_on])] +bin_vars_upper = [inner_2_upper_map[var] for var in bin_vars] obj = mach.fitresult.fitresult[1](bin_vars_upper) aux = @variable(upper_model) @@ -166,8 +209,20 @@ set_optimizer(upper_model, upper_solver) JuMP.optimize!(upper_model) termination_status(upper_model) -value.(bin_vars_upper) +sol_bin_vars = round.(Int, value.(bin_vars_upper)) objective_value(upper_model) -mach.fitresult.fitresult[1](value.(bin_vars_upper)) -abs(value(obj[1]) - true_ob_value) / true_ob_value \ No newline at end of file +############## +# Compare +############## +# Compare using approximator +mach.fitresult.fitresult[1](sol_bin_vars .* 1.0) +abs(value(obj[1]) - true_ob_value) / true_ob_value + +# Compare using foward pass +for i in 1:length(bin_vars_upper) + fix(bin_vars[i], sol_bin_vars[i]) +end + +JuMP.optimize!(model) +abs(objective_value(model) - true_ob_value) / true_ob_value \ No newline at end of file From 07e680e9fe6f2c46fb5e55318e23c490b0bc7d8c Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 15:07:55 -0500 Subject: [PATCH 19/74] update code --- examples/powermodels/unitcommitment.jl | 33 ++++++++++++++++++++++++++ src/cutting_planes.jl | 20 +++++++++------- 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/examples/powermodels/unitcommitment.jl b/examples/powermodels/unitcommitment.jl index c818645..fb0980a 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/powermodels/unitcommitment.jl @@ -118,6 +118,28 @@ true_sol = value.(bin_vars) # Solve model using cutting plane algorithm # upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) +############## +# Enhance dataset +############## +# Get upper model +inner_model = model +upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) +# delete binary constraints from inner model +delete_binary_terms!(inner_model; delete_objective=false) +# add deficit constraints +add_deficit_constraints!(inner_model) +# link binary variables from upper to inner model +upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) +u_inner = values(upper_2_inner) +set_optimizer(inner_model, inner_solver) +# Parameter values +num_s = 100 +parameter_values = Dict(u_inner .=> [rand([0;1],num_s) for i in 1:length(u_inner)]) + +# The iterator +problem_iterator = ProblemIterator(parameter_values) + + ############## # Fit DNN approximator ############## @@ -166,6 +188,12 @@ controls=[Step(1), WithIterationsDo(update_epochs) ] +function SobolevLoss_mse(x, y) + o_term = Flux.mse(x, y[:, 1]) + d_term = Flux.mse(gradient( ( _x ) -> sum(layer( _x )), x), y[:, 2:end]) + return o_term + d_term * 0.1 +end + iterated_pipe = IteratedModel(model=nn, controls=controls, @@ -182,6 +210,11 @@ predictions = convert.(Float64, predict(mach, X)) error = abs.(y .- predictions) ./ y mean(error) +# Derivatives +layer = mach.fitresult.fitresult[1] +gradient( ( _x ) -> sum(layer( _x )), X') + + ############## # Solve using DNN approximator ############## diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl index 29c0125..95a998d 100644 --- a/src/cutting_planes.jl +++ b/src/cutting_planes.jl @@ -94,21 +94,25 @@ function copy_binary_model(m_from::JuMP.Model) end # remove binary terms -function delete_binary_terms!(m::JuMP.Model) - obj = objective_function(m) - for var in keys(obj.terms) - if is_binary(var) - println("deleting $var") - delete!(obj.terms, var) +function delete_binary_terms!(m::JuMP.Model; delete_objective=true) + if delete_objective + obj = objective_function(m) + for var in keys(obj.terms) + if is_binary(var) + println("deleting $var") + delete!(obj.terms, var) + end end + obj.constant = 0.0 + set_objective_function(m, obj) end - obj.constant = 0.0 - set_objective_function(m, obj) # remove binary constraints from the original model for con in all_binary_constraints(m) delete(m, con) end + + return end function add_deficit_constraints!(model::JuMP.Model; penalty=1e7) From 4ca80023e5205974acfffec5af09776723e53877 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 17:17:59 -0500 Subject: [PATCH 20/74] update code --- README.md | 4 ++-- .../unitcommitment.jl | 18 +++++++++++++++--- src/arrowrecorder.jl | 8 +------- src/csvrecorder.jl | 8 +------- src/datasetgen.jl | 12 +++++++++--- 5 files changed, 28 insertions(+), 22 deletions(-) rename examples/{powermodels => unitcommitment}/unitcommitment.jl (91%) diff --git a/README.md b/README.md index d0aedd3..562866c 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ problem_iterator = ProblemIterator(parameter_values) The parameter values of the problem iterator can be saved by simply: ```julia -save(problem_iterator, "input_file.csv", CSVFile) +save(problem_iterator, "input_file", CSVFile) ``` Which creates the following CSV: @@ -64,7 +64,7 @@ Then chose what values to record: recorder = Recorder{CSVFile}("output_file.csv", primal_variables=[x], dual_variables=[cons]) # Finally solve all problems described by the iterator -solve_batch(model, problem_iterator, recorder) +solve_batch(problem_iterator, recorder) ``` Which creates the following CSV: diff --git a/examples/powermodels/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl similarity index 91% rename from examples/powermodels/unitcommitment.jl rename to examples/unitcommitment/unitcommitment.jl index fb0980a..fb36a8f 100644 --- a/examples/powermodels/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -28,6 +28,8 @@ import UnitCommitment: include("src/cutting_planes.jl") +data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname(@__FILE__), "data") + ############## # Solver ############## @@ -38,11 +40,14 @@ upper_solver = Gurobi.Optimizer ############## # Load Instance ############## - +case_name = "case300" +date = "2017-01-01" +horizon = 2 +save_name = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) instance = UnitCommitment.read_benchmark( - "matpower/case300/2017-01-01", + joinpath("matpower", case_name, date), ) -instance.time = 2 +instance.time = horizon # Construct model (using state-of-the-art defaults) model = UnitCommitment.build_model( @@ -123,6 +128,7 @@ true_sol = value.(bin_vars) ############## # Get upper model inner_model = model +MOI.set(inner_model, Gurobi.CallbackFunction(), nothing) upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) # delete binary constraints from inner model delete_binary_terms!(inner_model; delete_objective=false) @@ -138,7 +144,13 @@ parameter_values = Dict(u_inner .=> [rand([0;1],num_s) for i in 1:length(u_inner # The iterator problem_iterator = ProblemIterator(parameter_values) +save(problem_iterator, joinpath(data_dir, "input_" * save_name), CSVFile) + +# CSV recorder to save the optimal primal and dual decision values +recorder = Recorder{CSVFile}(joinpath(data_dir, "output_" * save_name); model=inner_model) +# Finally solve all problems described by the iterator +solve_batch(problem_iterator, recorder) ############## # Fit DNN approximator diff --git a/src/arrowrecorder.jl b/src/arrowrecorder.jl index 9dcc4ed..e2d1618 100644 --- a/src/arrowrecorder.jl +++ b/src/arrowrecorder.jl @@ -12,13 +12,7 @@ function record(recorder::Recorder{ArrowFile}, id::UUID; input=false) _filename = _filename * "_$(string(id))." * string(ArrowFile) - model = if length(recorder.primal_variables) > 0 - owner_model(recorder.primal_variables[1]) - elseif length(recorder.dual_variables) > 0 - owner_model(recorder.dual_variables[1]) - else - @error("Recorder has no variables") - end + model = recorder.model df = (; id=[id], diff --git a/src/csvrecorder.jl b/src/csvrecorder.jl index f5284b1..f5fcf7b 100644 --- a/src/csvrecorder.jl +++ b/src/csvrecorder.jl @@ -41,13 +41,7 @@ function record(recorder::Recorder{CSVFile}, id::UUID; input=false) write(f, ",$val") end # save objective value - model = if length(recorder.primal_variables) > 0 - owner_model(recorder.primal_variables[1]) - elseif length(recorder.dual_variables) > 0 - owner_model(recorder.dual_variables[1]) - else - @error("Recorder has no variables") - end + model = recorder.model if !input # save objective value obj = JuMP.objective_value(model) diff --git a/src/datasetgen.jl b/src/datasetgen.jl index e97906d..69bbfd1 100644 --- a/src/datasetgen.jl +++ b/src/datasetgen.jl @@ -32,6 +32,7 @@ end Recorder of optimization problem solutions. """ mutable struct Recorder{T<:FileType} + model::JuMP.Model recorder_file::RecorderFile{T} recorder_file_input::RecorderFile{T} primal_variables::Vector @@ -44,6 +45,13 @@ mutable struct Recorder{T<:FileType} primal_variables=[], dual_variables=[], filterfn=filter_fn, + model= if length(primal_variables) > 0 + owner_model(primal_variables[1]) + elseif length(dual_variables) > 0 + owner_model(dual_variables[1]) + else + @error("No model provided") + end, ) where {T<:FileType} return new{T}( RecorderFile{T}(filename), @@ -121,9 +129,7 @@ function save( ) where {T<:FileType} kys = sort(collect(keys(problem_iterator.pairs)); by=(v) -> index(v).value) df = (; id=problem_iterator.ids,) - for ky in kys - df = merge(df, (; Symbol(ky) => problem_iterator.pairs[ky])) - end + df = merge(df, (; zip(Symbol.(kys), [problem_iterator.pairs[ky] for ky in kys])...)) save(df, filename, file_type) return nothing end From c9754d8345794e94e57481ad13c90c739763bcab Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 17:30:54 -0500 Subject: [PATCH 21/74] fix bug --- src/datasetgen.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/datasetgen.jl b/src/datasetgen.jl index 69bbfd1..9859b54 100644 --- a/src/datasetgen.jl +++ b/src/datasetgen.jl @@ -54,6 +54,7 @@ mutable struct Recorder{T<:FileType} end, ) where {T<:FileType} return new{T}( + model, RecorderFile{T}(filename), RecorderFile{T}(filename_input), primal_variables, From a93470fb9baf4088967d5871e58555b94def41a8 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 11 Jan 2024 19:39:02 -0500 Subject: [PATCH 22/74] update code --- examples/unitcommitment/unitcommitment.jl | 79 +++++++++++++++++------ 1 file changed, 61 insertions(+), 18 deletions(-) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index fb36a8f..d41c7c1 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -19,6 +19,8 @@ using HiGHS, Gurobi using JuMP using UnitCommitment import ParametricOptInterface as POI +using DataFrames +using CSV import UnitCommitment: Formulation, @@ -43,7 +45,7 @@ upper_solver = Gurobi.Optimizer case_name = "case300" date = "2017-01-01" horizon = 2 -save_name = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) instance = UnitCommitment.read_benchmark( joinpath("matpower", case_name, date), ) @@ -57,13 +59,35 @@ model = UnitCommitment.build_model( # Set solver attributes set_optimizer_attribute(model, "PoolSearchMode", 2) -set_optimizer_attribute(model, "PoolSolutions", 100) +set_optimizer_attribute(model, "PoolSolutions", 300) ############## # Solve and store solutions ############## -bin_vars = all_binary_variables(model) +bin_vars = vcat( + collect(values(model[:is_on])), + collect(values(model[:startup])), + collect(values(model[:switch_on])), + collect(values(model[:switch_off])) +) +function tuple_2_name(smb) + str = string(smb[1]) + for i in 2:length(smb) + str = str * "_" * string(smb[i]) + end + return str +end +bin_vars_names = vcat( + "is_on_" .* tuple_2_name.(collect(keys(model[:is_on]))), + "startup_" .* tuple_2_name.(collect(keys(model[:startup]))), + "switch_on_" .* tuple_2_name.(collect(keys(model[:switch_on]))), + "switch_off_" .* tuple_2_name.(collect(keys(model[:switch_off]))) +) + +@assert all([is_binary(var) for var in bin_vars]) +@assert length(bin_vars) == length(all_binary_variables(model)) + obj_terms = objective_function(model).terms obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] num_bin_var = length(bin_vars) @@ -130,38 +154,55 @@ true_sol = value.(bin_vars) inner_model = model MOI.set(inner_model, Gurobi.CallbackFunction(), nothing) upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) + # delete binary constraints from inner model delete_binary_terms!(inner_model; delete_objective=false) # add deficit constraints add_deficit_constraints!(inner_model) # link binary variables from upper to inner model upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) -u_inner = values(upper_2_inner) +# get parameters from inner model in the right order +u_inner = [upper_2_inner[inner_2_upper_map[var]] for var in bin_vars] +# set names +set_name.(u_inner, bin_vars_names) +# set solver set_optimizer(inner_model, inner_solver) # Parameter values -num_s = 100 -parameter_values = Dict(u_inner .=> [rand([0;1],num_s) for i in 1:length(u_inner)]) +num_s = 1000 +parameter_values = Dict(u_inner .=> [rand(num_s) for i in 1:length(u_inner)]) # The iterator problem_iterator = ProblemIterator(parameter_values) -save(problem_iterator, joinpath(data_dir, "input_" * save_name), CSVFile) - +input_file = "input_" * save_file +save(problem_iterator, joinpath(data_dir, input_file), CSVFile) +input_file = input_file * "." * string(CSVFile) # CSV recorder to save the optimal primal and dual decision values -recorder = Recorder{CSVFile}(joinpath(data_dir, "output_" * save_name); model=inner_model) +output_file = "output_" * save_file +recorder = Recorder{CSVFile}(joinpath(data_dir, output_file); model=inner_model) +output_file = output_file * "." * string(CSVFile) # Finally solve all problems described by the iterator solve_batch(problem_iterator, recorder) +# Read input and output data +input_data = CSV.read(joinpath(data_dir, input_file), DataFrame) +output_data = CSV.read(joinpath(data_dir, output_file), DataFrame) + +# Separate input and output variables +output_variables = output_data[!, :objective] +input_features = innerjoin(input_data, output_data[!, [:id]], on = :id)[!, Symbol.(bin_vars_names)] # just use success solves + + ############## # Fit DNN approximator ############## # optimiser=Flux.Optimise.Adam() optimiser=ConvexRule( - Flux.Optimise.Adam(0.001, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) + Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) ) nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([1024, 300, 64, 32]), # [1024, 300, 64, 32] + builder=FullyConnectedBuilder([1024, 1024, 300, 64, 32]), # [1024, 300, 64, 32] rng=123, epochs=100, optimiser=optimiser, @@ -169,11 +210,14 @@ nn = MultitargetNeuralNetworkRegressor(; batch_size=24, ) -# Define the machine +# Data X = hcat(my_storage_vars...)'[:,:] y = convert.(Float64, my_storage_obj[:,:]) -# constrols +X = vcat(X, Matrix(input_features)) +y = vcat(y, output_variables) + +# Constrols clear() = begin global losses = [] @@ -200,6 +244,7 @@ controls=[Step(1), WithIterationsDo(update_epochs) ] +# WIP function SobolevLoss_mse(x, y) o_term = Flux.mse(x, y[:, 1]) d_term = Flux.mse(gradient( ( _x ) -> sum(layer( _x )), x), y[:, 2:end]) @@ -209,10 +254,11 @@ end iterated_pipe = IteratedModel(model=nn, controls=controls, - resampling=Holdout(fraction_train=0.7), - measure = Flux.mse, + resampling=Holdout(fraction_train=0.8), + measure = l2, ) +# Fit model clear() mach = machine(iterated_pipe, X, y) fit!(mach; verbosity=2) @@ -238,9 +284,6 @@ function NNlib.relu(ex::AffExpr) return relu_out end -# Get upper model -upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) - # add nn to model bin_vars_upper = [inner_2_upper_map[var] for var in bin_vars] obj = mach.fitresult.fitresult[1](bin_vars_upper) From 6058b2d1d5267cd0a62e0de10f77c5e760a9f3ef Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 12 Jan 2024 12:58:57 -0500 Subject: [PATCH 23/74] update to save data --- examples/unitcommitment/unitcommitment.jl | 27 ++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index d41c7c1..08528e1 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -15,12 +15,12 @@ using L2O using JuMP using CUDA using Logging -using HiGHS, Gurobi using JuMP using UnitCommitment import ParametricOptInterface as POI using DataFrames using CSV +using UUIDs import UnitCommitment: Formulation, @@ -141,6 +141,27 @@ MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) UnitCommitment.optimize!(model) is_relaxed = findall(x -> x == 1, is_relaxed) +# Data +X = hcat(my_storage_vars...)'[:,:] +y = convert.(Float64, my_storage_obj[:,:]) + +batch_id = uuid1() + +# Save solutions +instances_ids = [uuid1() for i in 1:length(my_storage_vars)] +df_in = DataFrame(Dict(Symbol.(bin_vars_names) .=> eachcol(X))) +df_in.id = instances_ids +df_out = DataFrame(Dict(:objective => y[:,1])) +df_out.id = instances_ids +# time,status,primal_status,dual_status +df_out.time = fill(0.0, length(instances_ids)) +df_out.status = fill("OPTIMAL", length(instances_ids)) +df_out.primal_status = fill("FEASIBLE_POINT", length(instances_ids)) +df_out.dual_status = fill("FEASIBLE_POINT", length(instances_ids)) + +CSV.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".csv"), df_in) +CSV.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".csv"), df_out) + true_ob_value = objective_value(model) true_sol = value.(bin_vars) @@ -210,10 +231,6 @@ nn = MultitargetNeuralNetworkRegressor(; batch_size=24, ) -# Data -X = hcat(my_storage_vars...)'[:,:] -y = convert.(Float64, my_storage_obj[:,:]) - X = vcat(X, Matrix(input_features)) y = vcat(y, output_variables) From bf6384cfe56cf1e40462a1065bf51ae63c0b36e5 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 12 Jan 2024 13:08:59 -0500 Subject: [PATCH 24/74] add to git ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 6cc3760..1eeca09 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ Manifest.toml *.arrow *.m *.csv +.DS_Store From 250d0f20e546674c8ae86700f4543c073a8346c5 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Thu, 18 Jan 2024 17:56:52 -0500 Subject: [PATCH 25/74] update --- examples/unitcommitment/bnb_dataset.jl | 233 ++++++++++++++++++++++ examples/unitcommitment/unitcommitment.jl | 199 ++---------------- 2 files changed, 245 insertions(+), 187 deletions(-) create mode 100644 examples/unitcommitment/bnb_dataset.jl diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl new file mode 100644 index 0000000..56d9c56 --- /dev/null +++ b/examples/unitcommitment/bnb_dataset.jl @@ -0,0 +1,233 @@ +############## +# Load Packages +############## +# Data Generation +using LinearAlgebra +using Gurobi +using L2O +using JuMP +using Logging +using JuMP +using UnitCommitment +import ParametricOptInterface as POI +using DataFrames +using CSV +using UUIDs +using Arrow +using SparseArrays + +import UnitCommitment: + Formulation, + KnuOstWat2018, + MorLatRam2013, + ShiftFactorsFormulation + +function uc_load_disturbances!(instance; load_disturbances_range=-20:20) + for bus in instance.buses + bus.load = bus.load .+ rand(load_disturbances_range) + bus.load = max.(bus.load, 0.0) + end +end + +""" + Build model +""" +function build_model_uc(instance; solver=Gurobi.Optimizer, PoolSearchMode=2, PoolSolutions=100) + # Construct model (using state-of-the-art defaults) + model = UnitCommitment.build_model( + instance = instance, + optimizer = solver, + ) + + # Set solver attributes + if !isnothing(PoolSearchMode) + set_optimizer_attribute(model, "PoolSearchMode", PoolSearchMode) + set_optimizer_attribute(model, "PoolSolutions", PoolSolutions) + end + + return model +end + + +function tuple_2_name(smb) + str = string(smb[1]) + for i in 2:length(smb) + str = str * "_" * string(smb[i]) + end + return str +end + +function bin_variables_retriever(model) + bin_vars = vcat( + collect(values(model[:is_on])), + collect(values(model[:startup])), + collect(values(model[:switch_on])), + collect(values(model[:switch_off])) + ) + + bin_vars_names = vcat( + "is_on_" .* tuple_2_name.(collect(keys(model[:is_on]))), + "startup_" .* tuple_2_name.(collect(keys(model[:startup]))), + "switch_on_" .* tuple_2_name.(collect(keys(model[:switch_on]))), + "switch_off_" .* tuple_2_name.(collect(keys(model[:switch_off]))) + ) + return bin_vars, bin_vars_names +end + +""" + Build branch and bound dataset +""" +function uc_bnb_dataset(model, save_file; data_dir=pwd(), batch_id = uuid1(), filetype=ArrowFile) + ############## + # Solve and store solutions + ############## + + bin_vars, bin_vars_names = bin_variables_retriever(model) + + @assert all([is_binary(var) for var in bin_vars]) + @assert length(bin_vars) == length(all_binary_variables(model)) + + obj_terms = objective_function(model).terms + obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] + num_bin_var = length(bin_vars) + num_all_var = num_variables(model) + + global my_storage_vars = [] + global my_storage_obj = [] + global is_relaxed = [] + global non_optimals = [] + function my_callback_function(cb_data, cb_where::Cint) + # You can select where the callback is run + if cb_where == GRB_CB_MIPNODE + resultobj = Ref{Cint}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_STATUS, resultobj) + if resultobj[] != GRB_OPTIMAL + push!(non_optimals, resultobj[]) + return # Solution is something other than optimal. + end + gurobi_indexes_all = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[var.index]) for var in all_variables(model) if haskey(obj_terms, var)] + gurobi_indexes_bin = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] + resultP = Vector{Cdouble}(undef, num_all_var) + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) + push!(my_storage_vars, resultP[gurobi_indexes_bin]) + # Get the objective value + push!(my_storage_obj, dot(obj_terms_gurobi, resultP[gurobi_indexes_all])) + # mark as relaxed + push!(is_relaxed, 1) + return + end + if cb_where == GRB_CB_MIPSOL + # Before querying `callback_value`, you must call: + Gurobi.load_callback_variable_primal(cb_data, cb_where) + # Get the values of the variables + x = [callback_value(cb_data, var) for var in bin_vars] + # push + push!(my_storage_vars, x) + # Get the objective value + obj = Ref{Cdouble}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) + # push + push!(my_storage_obj, obj[]) + # mark as not relaxed + push!(is_relaxed, 0) + return + end + return + end + MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) + + # JuMP.optimize!(model) + UnitCommitment.optimize!(model) + # push optimal solution + x = [value(var) for var in bin_vars] + push!(my_storage_vars, x) + optimal_obj = objective_value(model) + push!(my_storage_obj, optimal_obj) + # mark as not relaxed + push!(is_relaxed, 0) + + is_relaxed = findall(x -> x == 1, is_relaxed) + + # Data + X = hcat(my_storage_vars...)'[:,:] + y = convert.(Float64, my_storage_obj[:,:]) + + # Save solutions + # Input + instances_ids = [uuid1() for i in 1:length(my_storage_vars)] + df_in = DataFrame(X, Symbol.(bin_vars_names)) + df_in.id = instances_ids + # Save Loads + for bus in instance.buses + for t in 1:instance.time + df_in[!, Symbol("load_" * string(bus.name) * "_" * string(t))] = fill(bus.load[t], length(instances_ids)) + end + end + # Output + df_out = DataFrame(Dict(:objective => y[:,1])) + df_out.id = instances_ids + df_out.time = fill(solve_time(model), length(instances_ids)) + df_out.status = fill("LOCALLY_SOLVED", length(instances_ids)) + df_out.primal_status = fill("FEASIBLE_POINT", length(instances_ids)) + df_out.dual_status = fill("FEASIBLE_POINT", length(instances_ids)) + # mark as optimal + df_out.status[end] = "OPTIMAL" + # mark as relaxed + df_out.status[is_relaxed] = fill("INFEASIBLE", length(is_relaxed)) + df_out.primal_status[is_relaxed] = fill("INFEASIBLE_POINT", length(is_relaxed)) + df_out.dual_status[is_relaxed] = fill("INFEASIBLE_POINT", length(is_relaxed)) + + # Save + if filetype === ArrowFile + Arrow.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".arrow"), df_in) + Arrow.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".arrow"), df_out) + else + CSV.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".csv"), df_in) + CSV.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".csv"), df_out) + end + + @info "Saved dataset to $(data_dir)" batch_id length(instances_ids) length(is_relaxed) optimal_obj + + return + +end + +""" + Enhance dataset +""" +function uc_random_dataset!(inner_model, save_file; delete_objective=false, inner_solver=() -> POI.Optimizer(Gurobi.Optimizer()), data_dir=pwd(), filetype=ArrowFile, num_s = 1000, non_zero_units = 0.3) + MOI.set(inner_model, Gurobi.CallbackFunction(), nothing) + bin_vars, bin_vars_names = bin_variables_retriever(inner_model) + # Remove binary constraints + upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) + + # delete binary constraints from inner model + delete_binary_terms!(inner_model; delete_objective=delete_objective) + # add deficit constraints + add_deficit_constraints!(inner_model) + # link binary variables from upper to inner model + upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) + # get parameters from inner model in the right order + u_inner = [upper_2_inner[inner_2_upper_map[var]] for var in bin_vars] + # set names + set_name.(u_inner, bin_vars_names) + # set solver + set_optimizer(inner_model, inner_solver) + # Parameter values + u_values = abs.(Matrix(hcat([sprandn(length(u_inner), 1, non_zero_units) for i in 1:num_s]...)')) + u_values = min.(u_values, 1.0) + parameter_values = Dict(u_inner .=> eachcol(u_values)) + + # The iterator + problem_iterator = ProblemIterator(parameter_values) + input_file = "input_" * save_file + save(problem_iterator, joinpath(data_dir, input_file), filetype) + input_file = input_file * "." * string(filetype) + # CSV recorder to save the optimal primal and dual decision values + output_file = "output_" * save_file + recorder = Recorder{filetype}(joinpath(data_dir, output_file); model=inner_model) + output_file = output_file * "." * string(filetype) + + # Finally solve all problems described by the iterator + solve_batch(problem_iterator, recorder) +end \ No newline at end of file diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 08528e1..3ad34a0 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -3,207 +3,37 @@ ################################################### ############## -# Load Packages +# Load Functions ############## -using LinearAlgebra -using MLJFlux -using Gurobi -using Flux -using MLJ -using L2O -using JuMP -using CUDA -using Logging -using JuMP -using UnitCommitment -import ParametricOptInterface as POI -using DataFrames -using CSV -using UUIDs - -import UnitCommitment: - Formulation, - KnuOstWat2018, - MorLatRam2013, - ShiftFactorsFormulation +include("examples/unitcommitment/bnb_dataset.jl") include("src/cutting_planes.jl") data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname(@__FILE__), "data") -############## -# Solver -############## - -inner_solver = () -> POI.Optimizer(Gurobi.Optimizer()) -upper_solver = Gurobi.Optimizer - ############## # Load Instance ############## case_name = "case300" date = "2017-01-01" -horizon = 2 -save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) +instance.time = 2 +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(instance.time) instance = UnitCommitment.read_benchmark( joinpath("matpower", case_name, date), ) -instance.time = horizon - -# Construct model (using state-of-the-art defaults) -model = UnitCommitment.build_model( - instance = instance, - optimizer = upper_solver, -) - -# Set solver attributes -set_optimizer_attribute(model, "PoolSearchMode", 2) -set_optimizer_attribute(model, "PoolSolutions", 300) - -############## -# Solve and store solutions -############## - -bin_vars = vcat( - collect(values(model[:is_on])), - collect(values(model[:startup])), - collect(values(model[:switch_on])), - collect(values(model[:switch_off])) -) -function tuple_2_name(smb) - str = string(smb[1]) - for i in 2:length(smb) - str = str * "_" * string(smb[i]) - end - return str -end -bin_vars_names = vcat( - "is_on_" .* tuple_2_name.(collect(keys(model[:is_on]))), - "startup_" .* tuple_2_name.(collect(keys(model[:startup]))), - "switch_on_" .* tuple_2_name.(collect(keys(model[:switch_on]))), - "switch_off_" .* tuple_2_name.(collect(keys(model[:switch_off]))) -) - -@assert all([is_binary(var) for var in bin_vars]) -@assert length(bin_vars) == length(all_binary_variables(model)) - -obj_terms = objective_function(model).terms -obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] -num_bin_var = length(bin_vars) -num_all_var = num_variables(model) -global my_storage_vars = [] -global my_storage_obj = [] -global is_relaxed = [] -global non_optimals = [] -function my_callback_function(cb_data, cb_where::Cint) - # You can select where the callback is run - if cb_where == GRB_CB_MIPNODE - resultobj = Ref{Cint}() - GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_STATUS, resultobj) - if resultobj[] != GRB_OPTIMAL - push!(non_optimals, resultobj[]) - return # Solution is something other than optimal. - end - gurobi_indexes_all = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[var.index]) for var in all_variables(model) if haskey(obj_terms, var)] - gurobi_indexes_bin = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] - resultP = Vector{Cdouble}(undef, num_all_var) - GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) - push!(my_storage_vars, resultP[gurobi_indexes_bin]) - # Get the objective value - push!(my_storage_obj, dot(obj_terms_gurobi, resultP[gurobi_indexes_all])) - # mark as relaxed - push!(is_relaxed, 1) - return - end - if cb_where == GRB_CB_MIPSOL - # Before querying `callback_value`, you must call: - Gurobi.load_callback_variable_primal(cb_data, cb_where) - # Get the values of the variables - x = [callback_value(cb_data, var) for var in bin_vars] - # push - push!(my_storage_vars, x) - # Get the objective value - obj = Ref{Cdouble}() - GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) - # push - push!(my_storage_obj, obj[]) - # mark as not relaxed - push!(is_relaxed, 0) - return - end - return -end -MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) - -# JuMP.optimize!(model) -UnitCommitment.optimize!(model) -is_relaxed = findall(x -> x == 1, is_relaxed) - -# Data -X = hcat(my_storage_vars...)'[:,:] -y = convert.(Float64, my_storage_obj[:,:]) - -batch_id = uuid1() - -# Save solutions -instances_ids = [uuid1() for i in 1:length(my_storage_vars)] -df_in = DataFrame(Dict(Symbol.(bin_vars_names) .=> eachcol(X))) -df_in.id = instances_ids -df_out = DataFrame(Dict(:objective => y[:,1])) -df_out.id = instances_ids -# time,status,primal_status,dual_status -df_out.time = fill(0.0, length(instances_ids)) -df_out.status = fill("OPTIMAL", length(instances_ids)) -df_out.primal_status = fill("FEASIBLE_POINT", length(instances_ids)) -df_out.dual_status = fill("FEASIBLE_POINT", length(instances_ids)) - -CSV.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".csv"), df_in) -CSV.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".csv"), df_out) - -true_ob_value = objective_value(model) -true_sol = value.(bin_vars) - -# Solve model using cutting plane algorithm -# upper_model, lower_bound, upper_bound, gap = cutting_planes!(model; upper_solver, inner_solver) +model = build_model_uc(instance) +uc_bnb_dataset(model, save_file; data_dir=data_dir) +uc_random_dataset!(model, save_file; data_dir=data_dir) ############## -# Enhance dataset +# Fit DNN approximator ############## -# Get upper model -inner_model = model -MOI.set(inner_model, Gurobi.CallbackFunction(), nothing) -upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) - -# delete binary constraints from inner model -delete_binary_terms!(inner_model; delete_objective=false) -# add deficit constraints -add_deficit_constraints!(inner_model) -# link binary variables from upper to inner model -upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) -# get parameters from inner model in the right order -u_inner = [upper_2_inner[inner_2_upper_map[var]] for var in bin_vars] -# set names -set_name.(u_inner, bin_vars_names) -# set solver -set_optimizer(inner_model, inner_solver) -# Parameter values -num_s = 1000 -parameter_values = Dict(u_inner .=> [rand(num_s) for i in 1:length(u_inner)]) - -# The iterator -problem_iterator = ProblemIterator(parameter_values) -input_file = "input_" * save_file -save(problem_iterator, joinpath(data_dir, input_file), CSVFile) -input_file = input_file * "." * string(CSVFile) -# CSV recorder to save the optimal primal and dual decision values -output_file = "output_" * save_file -recorder = Recorder{CSVFile}(joinpath(data_dir, output_file); model=inner_model) -output_file = output_file * "." * string(CSVFile) - -# Finally solve all problems described by the iterator -solve_batch(problem_iterator, recorder) +using MLJFlux +using CUDA +using Flux +using MLJ # Read input and output data input_data = CSV.read(joinpath(data_dir, input_file), DataFrame) @@ -213,11 +43,6 @@ output_data = CSV.read(joinpath(data_dir, output_file), DataFrame) output_variables = output_data[!, :objective] input_features = innerjoin(input_data, output_data[!, [:id]], on = :id)[!, Symbol.(bin_vars_names)] # just use success solves - -############## -# Fit DNN approximator -############## - # optimiser=Flux.Optimise.Adam() optimiser=ConvexRule( Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) From 7417ae6421ba872107d07003c60497a30969cee7 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 19 Jan 2024 09:00:36 -0500 Subject: [PATCH 26/74] update --- examples/unitcommitment/bnb_dataset.jl | 46 ++++++++++----- .../unitcommitment/uc_dataset_generation.jl | 58 +++++++++++++++++++ examples/unitcommitment/unitcommitment.jl | 8 --- 3 files changed, 90 insertions(+), 22 deletions(-) create mode 100644 examples/unitcommitment/uc_dataset_generation.jl diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index 56d9c56..c0dc9a0 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -15,6 +15,7 @@ using CSV using UUIDs using Arrow using SparseArrays +using Statistics import UnitCommitment: Formulation, @@ -22,9 +23,9 @@ import UnitCommitment: MorLatRam2013, ShiftFactorsFormulation -function uc_load_disturbances!(instance; load_disturbances_range=-20:20) +function uc_load_disturbances!(instance, nominal_loads; load_disturbances_range=-20:20) for bus in instance.buses - bus.load = bus.load .+ rand(load_disturbances_range) + bus.load = nominal_loads .+ rand(load_disturbances_range) bus.load = max.(bus.load, 0.0) end end @@ -77,7 +78,7 @@ end """ Build branch and bound dataset """ -function uc_bnb_dataset(model, save_file; data_dir=pwd(), batch_id = uuid1(), filetype=ArrowFile) +function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), data_dir=pwd(), batch_id = uuid1(), filetype=ArrowFile) ############## # Solve and store solutions ############## @@ -195,37 +196,54 @@ end """ Enhance dataset """ -function uc_random_dataset!(inner_model, save_file; delete_objective=false, inner_solver=() -> POI.Optimizer(Gurobi.Optimizer()), data_dir=pwd(), filetype=ArrowFile, num_s = 1000, non_zero_units = 0.3) - MOI.set(inner_model, Gurobi.CallbackFunction(), nothing) - bin_vars, bin_vars_names = bin_variables_retriever(inner_model) +function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), delete_objective=false, inner_solver=() -> POI.Optimizer(Gurobi.Optimizer()), data_dir=pwd(), filetype=ArrowFile, num_s = 1000, non_zero_units = 0.15) + MOI.set(model, Gurobi.CallbackFunction(), nothing) + bin_vars, bin_vars_names = bin_variables_retriever(model) # Remove binary constraints - upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(inner_model) + upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) # delete binary constraints from inner model - delete_binary_terms!(inner_model; delete_objective=delete_objective) + delete_binary_terms!(model; delete_objective=delete_objective) # add deficit constraints - add_deficit_constraints!(inner_model) + add_deficit_constraints!(model) # link binary variables from upper to inner model - upper_2_inner = fix_binary_variables!(inner_model, inner_2_upper_map) + upper_2_inner = fix_binary_variables!(model, inner_2_upper_map) # get parameters from inner model in the right order u_inner = [upper_2_inner[inner_2_upper_map[var]] for var in bin_vars] # set names set_name.(u_inner, bin_vars_names) # set solver - set_optimizer(inner_model, inner_solver) + set_optimizer(model, inner_solver) # Parameter values u_values = abs.(Matrix(hcat([sprandn(length(u_inner), 1, non_zero_units) for i in 1:num_s]...)')) u_values = min.(u_values, 1.0) - parameter_values = Dict(u_inner .=> eachcol(u_values)) - + units_on = sum(u_values, dims=1) + @info "Number of units on: " mean(units_on) std(units_on) minimum(units_on) maximum(units_on) + parameter_values = Dict(u_inner .=> Array.(eachcol(u_values))) # The iterator problem_iterator = ProblemIterator(parameter_values) input_file = "input_" * save_file save(problem_iterator, joinpath(data_dir, input_file), filetype) input_file = input_file * "." * string(filetype) + # Add load data to input file + df_in = if filetype === ArrowFile + Arrow.Table(input_file) |> DataFrame + else + CSV.read(joinpath(data_dir, input_file), DataFrame) + end + for bus in instance.buses + for t in 1:instance.time + df_in[!, Symbol("load_" * string(bus.name) * "_" * string(t))] = fill(bus.load[t], length(df_in.id)) + end + end + if filetype === ArrowFile + Arrow.write(joinpath(data_dir, input_file), df_in) + else + CSV.write(joinpath(data_dir, input_file), df_in) + end # CSV recorder to save the optimal primal and dual decision values output_file = "output_" * save_file - recorder = Recorder{filetype}(joinpath(data_dir, output_file); model=inner_model) + recorder = Recorder{filetype}(joinpath(data_dir, output_file); model=model) output_file = output_file * "." * string(filetype) # Finally solve all problems described by the iterator diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl new file mode 100644 index 0000000..f44a7f0 --- /dev/null +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -0,0 +1,58 @@ +################################################################ +############## Unit Commitment Dataset Generation ############## +################################################################ + +############## +# Load Functions +############## + +include("examples/unitcommitment/bnb_dataset.jl") + +include("src/cutting_planes.jl") + +data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname(@__FILE__), "data") + +############## +# Parameters +############## +case_name = "case300" +date = "2017-01-01" +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(instance.time) +num_batches = 10 +horizon = 2 +solve_nominal = true + +@info "Case: $case_name, Date: $date, Horizon: $horizon" num_batches solve_nominal + +############## +# Load Instance +############## +instance = UnitCommitment.read_benchmark( + joinpath("matpower", case_name, date), +) +instance.time = horizon + +############## +# Solve and store solutions +############## + +if solve_nominal + model = build_model_uc(instance) + uc_bnb_dataset(instance, save_file; data_dir=data_dir, model=model) + uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model) +end + +# save nominal loads in a dictionary +nominal_loads = Dict() +for i in 1:length(instance.bus) + bus = instance.bus[i] + nominal_loads[i] = bus.load[1:horizon] +end + +for _ in 1:num_batches + # perturb loads + uc_load_disturbances!(instance, nominal_loads) + model = build_model_uc(instance) + uc_bnb_dataset(instance, save_file; data_dir=data_dir, model=model) + uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model) +end diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 3ad34a0..ba89d3f 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -17,15 +17,7 @@ data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname ############## case_name = "case300" date = "2017-01-01" -instance.time = 2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(instance.time) -instance = UnitCommitment.read_benchmark( - joinpath("matpower", case_name, date), -) - -model = build_model_uc(instance) -uc_bnb_dataset(model, save_file; data_dir=data_dir) -uc_random_dataset!(model, save_file; data_dir=data_dir) ############## # Fit DNN approximator From 538b81fae4f4814c804409de9362c2c9e7727eb8 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 19 Jan 2024 15:14:36 -0500 Subject: [PATCH 27/74] update --- examples/unitcommitment/slurm/slurm.jl | 29 +++++++++++++++++++ .../unitcommitment/uc_dataset_generation.jl | 10 ++++--- 2 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 examples/unitcommitment/slurm/slurm.jl diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl new file mode 100644 index 0000000..1df36d2 --- /dev/null +++ b/examples/unitcommitment/slurm/slurm.jl @@ -0,0 +1,29 @@ +#= Julia code for launching jobs on the slurm cluster. + +This code is expected to be run from an sbatch script after a module load julia command has been run. +It starts the remote processes with srun within an allocation. +If you get an error make sure to Pkg.checkout("CluterManagers"). + +=# +try + + using Distributed, ClusterManagers +catch + Pkg.add("ClusterManagers") + Pkg.checkout("ClusterManagers") +end + +using Distributed, ClusterManagers + +np = 4 # +addprocs(SlurmManager(np), job_file_loc = ARGS[1]) + +println("We are all connected and ready.") + +include(ARGS[2]) + +# The Slurm resource allocation is released when all the workers have +# exited +for i in workers() + rmprocs(i) +end \ No newline at end of file diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index f44a7f0..ef5d5d1 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -2,15 +2,17 @@ ############## Unit Commitment Dataset Generation ############## ################################################################ +using Distributed + ############## # Load Functions ############## -include("examples/unitcommitment/bnb_dataset.jl") +@everywhere include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) -include("src/cutting_planes.jl") +@everywhere include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) -data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname(@__FILE__), "data") +data_dir = joinpath(dirname(@__FILE__), "data") # joinpath(pwd(), "examples/unitcommitment", "data") ############## # Parameters @@ -49,7 +51,7 @@ for i in 1:length(instance.bus) nominal_loads[i] = bus.load[1:horizon] end -for _ in 1:num_batches +@distributed for _ in 1:num_batches # perturb loads uc_load_disturbances!(instance, nominal_loads) model = build_model_uc(instance) From 2196642e27938e5438e068f7bb7e0f0a62c4454c Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 19 Jan 2024 19:06:14 -0500 Subject: [PATCH 28/74] update --- .gitignore | 3 ++ examples/unitcommitment/Project.toml | 14 ++++++++ examples/unitcommitment/bnb_dataset.jl | 10 +++--- examples/unitcommitment/slurm/slurm.jl | 35 ++++++++++++++++++- .../unitcommitment/uc_dataset_generation.jl | 22 +++++++----- 5 files changed, 70 insertions(+), 14 deletions(-) create mode 100644 examples/unitcommitment/Project.toml diff --git a/.gitignore b/.gitignore index 1eeca09..913a10d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ Manifest.toml *.m *.csv .DS_Store +*.out +*.sbatch +examples/unitcommitment/app/* diff --git a/examples/unitcommitment/Project.toml b/examples/unitcommitment/Project.toml new file mode 100644 index 0000000..c4201b6 --- /dev/null +++ b/examples/unitcommitment/Project.toml @@ -0,0 +1,14 @@ +[deps] +Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45" +CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +Gurobi = "2e9cd046-0924-5485-92f1-d5272153d98b" +JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +L2O = "e1d8bfa7-c465-446a-84b9-451470f6e76c" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" +ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" +UnitCommitment = "64606440-39ea-11e9-0f29-3303a1d3d877" diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index c0dc9a0..d2bd1c1 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -1,7 +1,7 @@ ############## # Load Packages ############## -# Data Generation + using LinearAlgebra using Gurobi using L2O @@ -196,7 +196,7 @@ end """ Enhance dataset """ -function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), delete_objective=false, inner_solver=() -> POI.Optimizer(Gurobi.Optimizer()), data_dir=pwd(), filetype=ArrowFile, num_s = 1000, non_zero_units = 0.15) +function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), delete_objective=false, inner_solver=() -> POI.Optimizer(Gurobi.Optimizer()), data_dir=pwd(), filetype=ArrowFile, num_s = 1000, non_zero_units = 0.15, batch_id = uuid1()) MOI.set(model, Gurobi.CallbackFunction(), nothing) bin_vars, bin_vars_names = bin_variables_retriever(model) # Remove binary constraints @@ -222,12 +222,12 @@ function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), parameter_values = Dict(u_inner .=> Array.(eachcol(u_values))) # The iterator problem_iterator = ProblemIterator(parameter_values) - input_file = "input_" * save_file + input_file = save_file * "_input_" * string(batch_id) save(problem_iterator, joinpath(data_dir, input_file), filetype) input_file = input_file * "." * string(filetype) # Add load data to input file df_in = if filetype === ArrowFile - Arrow.Table(input_file) |> DataFrame + Arrow.Table(joinpath(data_dir, input_file)) |> DataFrame else CSV.read(joinpath(data_dir, input_file), DataFrame) end @@ -242,7 +242,7 @@ function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), CSV.write(joinpath(data_dir, input_file), df_in) end # CSV recorder to save the optimal primal and dual decision values - output_file = "output_" * save_file + output_file = save_file * "_output_" * string(batch_id) recorder = Recorder{filetype}(joinpath(data_dir, output_file); model=model) output_file = output_file * "." * string(filetype) diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index 1df36d2..759a4b7 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -4,6 +4,39 @@ This code is expected to be run from an sbatch script after a module load julia It starts the remote processes with srun within an allocation. If you get an error make sure to Pkg.checkout("CluterManagers"). +== + +# Make sure to compile a julia image before: + +1) +julia --project=. -t1 --trace-compile=app/precompile.jl uc_dataset_generation.jl "dash" "dash" "case300" "2017-01-01" 2 0 true + +2) +```julia + +using PackageCompiler +create_sysimage( + [ + "LinearAlgebra", + "Gurobi", + "L2O", + "JuMP", + "Logging", + "JuMP", + "UnitCommitment", + "ParametricOptInterface", + "DataFrames", + "CSV", + "UUIDs", + "Arrow", + "SparseArrays", + "Statistics", + ]; + sysimage_path="app/julia.so", + precompile_statements_file="app/precompile.jl" +); +``` + =# try @@ -16,7 +49,7 @@ end using Distributed, ClusterManagers np = 4 # -addprocs(SlurmManager(np), job_file_loc = ARGS[1]) +addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=12) println("We are all connected and ready.") diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index ef5d5d1..ebd9d0f 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -8,6 +8,12 @@ using Distributed # Load Functions ############## +@everywhere import Pkg + +@everywhere Pkg.activate(dirname(dirname(@__DIR__))) + +@everywhere Pkg.instantiate() + @everywhere include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) @everywhere include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) @@ -17,12 +23,12 @@ data_dir = joinpath(dirname(@__FILE__), "data") # joinpath(pwd(), "examples/unit ############## # Parameters ############## -case_name = "case300" -date = "2017-01-01" -save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(instance.time) -num_batches = 10 -horizon = 2 -solve_nominal = true +case_name = ARGS[3] #"case300" +date = ARGS[4] # "2017-01-01" +horizon = parse(Int, ARGS[5]) # 2 +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) +num_batches = parse(Int, ARGS[6]) # 10 +solve_nominal = parse(Bool, ARGS[7]) #true @info "Case: $case_name, Date: $date, Horizon: $horizon" num_batches solve_nominal @@ -46,8 +52,8 @@ end # save nominal loads in a dictionary nominal_loads = Dict() -for i in 1:length(instance.bus) - bus = instance.bus[i] +for i in 1:length(instance.buses) + bus = instance.buses[i] nominal_loads[i] = bus.load[1:horizon] end From 4c329ba3feed16b5663f61f253489022f7b26408 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 19 Jan 2024 19:32:44 -0500 Subject: [PATCH 29/74] try fix --- examples/unitcommitment/slurm/slurm.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index 759a4b7..f05e464 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -15,6 +15,7 @@ julia --project=. -t1 --trace-compile=app/precompile.jl uc_dataset_generation.jl ```julia using PackageCompiler +using REPL; REPL.__init__() create_sysimage( [ "LinearAlgebra", From 0d120828c088ca4f5670fb07e15749dd7070cb9a Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 22 Jan 2024 14:41:17 -0500 Subject: [PATCH 30/74] update code --- .gitignore | 1 + examples/unitcommitment/slurm/slurm.jl | 38 ++------------------------ 2 files changed, 3 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 913a10d..9d1e2ad 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ Manifest.toml *.out *.sbatch examples/unitcommitment/app/* +*.edu diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index f05e464..6384706 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -3,42 +3,8 @@ This code is expected to be run from an sbatch script after a module load julia command has been run. It starts the remote processes with srun within an allocation. If you get an error make sure to Pkg.checkout("CluterManagers"). - -== - -# Make sure to compile a julia image before: - -1) -julia --project=. -t1 --trace-compile=app/precompile.jl uc_dataset_generation.jl "dash" "dash" "case300" "2017-01-01" 2 0 true - -2) -```julia - -using PackageCompiler -using REPL; REPL.__init__() -create_sysimage( - [ - "LinearAlgebra", - "Gurobi", - "L2O", - "JuMP", - "Logging", - "JuMP", - "UnitCommitment", - "ParametricOptInterface", - "DataFrames", - "CSV", - "UUIDs", - "Arrow", - "SparseArrays", - "Statistics", - ]; - sysimage_path="app/julia.so", - precompile_statements_file="app/precompile.jl" -); -``` - =# + try using Distributed, ClusterManagers @@ -50,7 +16,7 @@ end using Distributed, ClusterManagers np = 4 # -addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=12) +addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=0) println("We are all connected and ready.") From 21f7a677289cda7b151b2ef95b103006c2cf6ccc Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 22 Jan 2024 15:12:51 -0500 Subject: [PATCH 31/74] add arrow compresser --- examples/unitcommitment/compress_arrow.jl | 45 +++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 examples/unitcommitment/compress_arrow.jl diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl new file mode 100644 index 0000000..05072d8 --- /dev/null +++ b/examples/unitcommitment/compress_arrow.jl @@ -0,0 +1,45 @@ +using Arrow +using DataFrames + +# Data Parameters +case_name = "case300" +path_dataset = joinpath(pwd(), "examples", "unitcommitment", "data") +case_file_path = path_dataset # joinpath(path_dataset, case_name) + +# Load input and output data tables +iter_files = readdir(joinpath(case_file_path)) +file_ins = [ + joinpath(case_file_path, file) for file in iter_files if occursin("input", file) +] + +batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] + +file_name = split(split(file_ins[1], "_input")[1], "/")[end] + +# compress output files per batch id + +for batch_id in batch_ids + + file_outs = [ + joinpath(case_file_path, file) + for file in iter_files + if occursin("output", file) && occursin(batch_id, file) + ] + if length(file_outs) == 1 + continue + end + + # Load input and output data tables + output_table = Arrow.Table(file_outs) + + # Save compressed files + Arrow.write( + joinpath(case_file_path, "$(file_name)_output_" * batch_id * ".arrow"), + output_table, + ) + + # Delete uncompressed files + for file in file_outs + rm(file) + end +end From 0404d1969b16a42efed0ed294a5813644d2cbe9b Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 22 Jan 2024 18:52:35 -0500 Subject: [PATCH 32/74] update --- examples/unitcommitment/bnb_dataset.jl | 5 +++-- examples/unitcommitment/compress_arrow.jl | 15 +++++++++------ examples/unitcommitment/uc_dataset_generation.jl | 4 ++++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index d2bd1c1..034271e 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -24,8 +24,9 @@ import UnitCommitment: ShiftFactorsFormulation function uc_load_disturbances!(instance, nominal_loads; load_disturbances_range=-20:20) - for bus in instance.buses - bus.load = nominal_loads .+ rand(load_disturbances_range) + for i in 1:length(instance.buses) + bus = instance.buses[i] + bus.load = nominal_loads[i] .+ rand(load_disturbances_range) bus.load = max.(bus.load, 0.0) end end diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 05072d8..14b9a67 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -3,11 +3,15 @@ using DataFrames # Data Parameters case_name = "case300" -path_dataset = joinpath(pwd(), "examples", "unitcommitment", "data") +date = "2017-01-01" +horizon = 2 +path_dataset = joinpath( + dirname(@__FILE__), "data", case_name, date, "h" * string(horizon) +) case_file_path = path_dataset # joinpath(path_dataset, case_name) # Load input and output data tables -iter_files = readdir(joinpath(case_file_path)) +iter_files = readdir(joinpath(case_file_path, "input")) file_ins = [ joinpath(case_file_path, file) for file in iter_files if occursin("input", file) ] @@ -17,13 +21,12 @@ batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] file_name = split(split(file_ins[1], "_input")[1], "/")[end] # compress output files per batch id - +iter_files = readdir(joinpath(case_file_path)) for batch_id in batch_ids - file_outs = [ joinpath(case_file_path, file) for file in iter_files - if occursin("output", file) && occursin(batch_id, file) + if occursin("output", file) && occursin(batch_id, file) && occursin("arrow", file) ] if length(file_outs) == 1 continue @@ -34,7 +37,7 @@ for batch_id in batch_ids # Save compressed files Arrow.write( - joinpath(case_file_path, "$(file_name)_output_" * batch_id * ".arrow"), + joinpath(case_file_path, "output", "$(file_name)_output_" * batch_id * ".arrow"), output_table, ) diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index ebd9d0f..03211c8 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -29,6 +29,9 @@ horizon = parse(Int, ARGS[5]) # 2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) num_batches = parse(Int, ARGS[6]) # 10 solve_nominal = parse(Bool, ARGS[7]) #true +data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) +mkpath(joinpath(data_dir, "input")) +mkpath(joinpath(data_dir, "output")) @info "Case: $case_name, Date: $date, Horizon: $horizon" num_batches solve_nominal @@ -64,3 +67,4 @@ end uc_bnb_dataset(instance, save_file; data_dir=data_dir, model=model) uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model) end + From 64fbd1b77fc5586c38b638cfb1c022c48f679782 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Mon, 22 Jan 2024 18:53:06 -0500 Subject: [PATCH 33/74] update --- examples/unitcommitment/visualize.jl | 64 ++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 examples/unitcommitment/visualize.jl diff --git a/examples/unitcommitment/visualize.jl b/examples/unitcommitment/visualize.jl new file mode 100644 index 0000000..a5abe08 --- /dev/null +++ b/examples/unitcommitment/visualize.jl @@ -0,0 +1,64 @@ +using Plots +using Arrow +using DataFrames + +# Data Parameters +case_name = "case300" +date = "2017-01-01" +horizon = "2" +path_dataset = joinpath(pwd(), "examples", "unitcommitment", "data") +case_file_path = joinpath(path_dataset, case_name, date, "h"*horizon) + +# Load input and output data tables +file_ins = readdir(joinpath(case_file_path, "input"), join=true) +file_outs = readdir(joinpath(case_file_path, "output"), join=true) + +# Load input and output data tables +input_tables = Array{DataFrame}(undef, length(file_ins)) +output_tables = Array{DataFrame}(undef, length(file_outs)) +for (i, file) in enumerate(file_ins) + input_tables[i] = Arrow.Table(file) |> DataFrame +end +for (i, file) in enumerate(file_outs) + output_tables[i] = Arrow.Table(file) |> DataFrame + # if all the status are OPTIMAL, make them INFEASIBLE + if all(output_tables[i].status .== "OPTIMAL") + output_tables[i].status .= "INFEASIBLE" + end +end + +# concatenate all the input and output tables +input_table = vcat(input_tables...) +output_table = vcat(output_tables...) + +# sum each row of the input table (excluding the id) and store it in a new column +commit_columns = [col for col in names(input_table) if !occursin("load", col) && !occursin("id", col)] +load_columns = [col for col in names(input_table) if occursin("load", col)] +input_table.total_commitment = sum(eachcol(input_table[!, commit_columns])) +input_table.total_load = sum(eachcol(input_table[!, load_columns])) + +# join input and output tables keep on id and keep only total_commitment and objective +table_train = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id)[!, [:id, :total_commitment, :objective]] +# separate bnb into entries that have total commitment different from integer and those that have +# total commitment equal to integer +table_train_bnb_not_integer = table_train_bnb[table_train_bnb.total_commitment .!= floor.(table_train_bnb.total_commitment), :] +table_train_bnb_integer = table_train_bnb[table_train_bnb.total_commitment .== floor.(table_train_bnb.total_commitment), :] + +# plot the data +# now with the y axis on the log scale +plt = scatter( + table_train_random.total_commitment, + table_train_random.objective, + label="Random", xlabel="Total Commitment", ylabel="Objective", + title="", color=:red, yscale=:log10, legend=:outertopright, + alpha=0.2 +); +scatter!(plt, + table_train_bnb_not_integer.total_commitment, + table_train_bnb_not_integer.objective, + label="Branch and Bound Node", color=:blue, + marker=:x +); +scatter!(plt, table_train_bnb_integer.total_commitment, table_train_bnb_integer.objective, label="Branch and Bound Leaf", color=:yellow, + alpha=0.2 +) \ No newline at end of file From c88ff124a0c5f3099b37d5ef3502c276b094ea90 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 13:35:48 -0500 Subject: [PATCH 34/74] update --- examples/unitcommitment/bnb_dataset.jl | 125 ++++++++++-------- .../unitcommitment/uc_dataset_generation.jl | 31 +++-- 2 files changed, 93 insertions(+), 63 deletions(-) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index 034271e..4cde29f 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -16,6 +16,7 @@ using UUIDs using Arrow using SparseArrays using Statistics +using Random import UnitCommitment: Formulation, @@ -23,10 +24,10 @@ import UnitCommitment: MorLatRam2013, ShiftFactorsFormulation -function uc_load_disturbances!(instance, nominal_loads; load_disturbances_range=-20:20) +function uc_load_disturbances!(rng, instance, nominal_loads; load_disturbances_range=-20:20) for i in 1:length(instance.buses) bus = instance.buses[i] - bus.load = nominal_loads[i] .+ rand(load_disturbances_range) + bus.load = nominal_loads[i] .+ rand(rng, load_disturbances_range) bus.load = max.(bus.load, 0.0) end end @@ -76,6 +77,64 @@ function bin_variables_retriever(model) return bin_vars, bin_vars_names end +mutable struct StorageCallback <: Function + model + bin_vars + my_storage_vars + my_storage_obj + is_relaxed +end +function StorageCallback(model, bin_vars; maxiter=999999) + return StorageCallback( + model, + bin_vars, + [], + [], + [], + ) +end + +function (callback::StorageCallback)(cb_data, cb_where::Cint) + # You can select where the callback is run + if cb_where == GRB_CB_MIPNODE + # prep + obj_terms = objective_function(callback.model).terms + obj_terms_gurobi = [obj_terms[var] for var in all_variables(callback.model) if haskey(obj_terms, var)] + num_all_var = num_variables(callback.model) + # save + resultobj = Ref{Cint}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_STATUS, resultobj) + if resultobj[] != GRB_OPTIMAL + return # Solution is something other than optimal. + end + gurobi_indexes_all = [Gurobi.column(backend(callback.model).optimizer.model, callback.model.moi_backend.model_to_optimizer_map[var.index]) for var in all_variables(callback.model) if haskey(obj_terms, var)] + gurobi_indexes_bin = [Gurobi.column(backend(callback.model).optimizer.model, callback.model.moi_backend.model_to_optimizer_map[callback.bin_vars[i].index]) for i in 1:length(callback.bin_vars)] + + resultP = Vector{Cdouble}(undef, num_all_var) + GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) + push!(callback.my_storage_vars, resultP[gurobi_indexes_bin]) + # Get the objective value + push!(callback.my_storage_obj, dot(obj_terms_gurobi, resultP[gurobi_indexes_all])) + # mark as relaxed + push!(callback.is_relaxed, 1) + return + end + if cb_where == GRB_CB_MIPSOL + # Before querying `callback_value`, you must call: + Gurobi.load_callback_variable_primal(cb_data, cb_where) + # Get the values of the variables + x = [callback_value(cb_data, var) for var in callback.bin_vars] + push!(callback.my_storage_vars, x) + # Get the objective value + obj = Ref{Cdouble}() + GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) + push!(callback.my_storage_obj, obj[]) + # mark as not relaxed + push!(callback.is_relaxed, 0) + return + end +end + """ Build branch and bound dataset """ @@ -89,66 +148,28 @@ function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), dat @assert all([is_binary(var) for var in bin_vars]) @assert length(bin_vars) == length(all_binary_variables(model)) - obj_terms = objective_function(model).terms - obj_terms_gurobi = [obj_terms[var] for var in all_variables(model) if haskey(obj_terms, var)] - num_bin_var = length(bin_vars) - num_all_var = num_variables(model) - - global my_storage_vars = [] - global my_storage_obj = [] - global is_relaxed = [] - global non_optimals = [] - function my_callback_function(cb_data, cb_where::Cint) - # You can select where the callback is run - if cb_where == GRB_CB_MIPNODE - resultobj = Ref{Cint}() - GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_STATUS, resultobj) - if resultobj[] != GRB_OPTIMAL - push!(non_optimals, resultobj[]) - return # Solution is something other than optimal. - end - gurobi_indexes_all = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[var.index]) for var in all_variables(model) if haskey(obj_terms, var)] - gurobi_indexes_bin = [Gurobi.column(backend(model).optimizer.model, model.moi_backend.model_to_optimizer_map[bin_vars[i].index]) for i in 1:length(bin_vars)] - resultP = Vector{Cdouble}(undef, num_all_var) - GRBcbget(cb_data, cb_where, GRB_CB_MIPNODE_REL, resultP) - push!(my_storage_vars, resultP[gurobi_indexes_bin]) - # Get the objective value - push!(my_storage_obj, dot(obj_terms_gurobi, resultP[gurobi_indexes_all])) - # mark as relaxed - push!(is_relaxed, 1) - return - end - if cb_where == GRB_CB_MIPSOL - # Before querying `callback_value`, you must call: - Gurobi.load_callback_variable_primal(cb_data, cb_where) - # Get the values of the variables - x = [callback_value(cb_data, var) for var in bin_vars] - # push - push!(my_storage_vars, x) - # Get the objective value - obj = Ref{Cdouble}() - GRBcbget(cb_data, cb_where, GRB_CB_MIPSOL_OBJ, obj) - # push - push!(my_storage_obj, obj[]) - # mark as not relaxed - push!(is_relaxed, 0) - return - end - return - end + # Callback + my_callback_function = StorageCallback(model, bin_vars) + # Set callback MOI.set(model, Gurobi.CallbackFunction(), my_callback_function) # JuMP.optimize!(model) UnitCommitment.optimize!(model) + @info "status: " termination_status(model) # push optimal solution x = [value(var) for var in bin_vars] - push!(my_storage_vars, x) + push!(my_callback_function.my_storage_vars, x) optimal_obj = objective_value(model) - push!(my_storage_obj, optimal_obj) + push!(my_callback_function.my_storage_obj, optimal_obj) # mark as not relaxed - push!(is_relaxed, 0) + push!(my_callback_function.is_relaxed, 0) + + @info "Solved nominal instance" optimal_obj length(my_callback_function.my_storage_vars) + # post process is_relaxed = findall(x -> x == 1, is_relaxed) + my_storage_vars = my_callback_function.my_storage_vars + my_storage_obj = my_callback_function.my_storage_obj # Data X = hcat(my_storage_vars...)'[:,:] diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index 03211c8..91a2840 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -3,6 +3,7 @@ ################################################################ using Distributed +using Random ############## # Load Functions @@ -23,12 +24,12 @@ data_dir = joinpath(dirname(@__FILE__), "data") # joinpath(pwd(), "examples/unit ############## # Parameters ############## -case_name = ARGS[3] #"case300" -date = ARGS[4] # "2017-01-01" -horizon = parse(Int, ARGS[5]) # 2 +case_name = ARGS[3] # case_name = "case300" +date = ARGS[4] # date="2017-01-01" +horizon = parse(Int, ARGS[5]) # horizon=2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) -num_batches = parse(Int, ARGS[6]) # 10 -solve_nominal = parse(Bool, ARGS[7]) #true +num_batches = parse(Int, ARGS[6]) # num_batches=10 +solve_nominal = parse(Bool, ARGS[7]) # solve_nominal=true data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) mkpath(joinpath(data_dir, "input")) mkpath(joinpath(data_dir, "output")) @@ -60,11 +61,19 @@ for i in 1:length(instance.buses) nominal_loads[i] = bus.load[1:horizon] end -@distributed for _ in 1:num_batches - # perturb loads - uc_load_disturbances!(instance, nominal_loads) - model = build_model_uc(instance) - uc_bnb_dataset(instance, save_file; data_dir=data_dir, model=model) - uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model) +@distributed for i in 1:num_batches + rng = MersenneTwister(round(Int, i * time())) + instance_ = deepcopy(instance) + uc_load_disturbances!(rng, instance_, nominal_loads) + # perturbed loads + perturbed_loads_sum = 0.0 + for i in 1:length(instance_.buses) + bus = instance_.buses[i] + perturbed_loads_sum += sum(bus.load) + end + @info "Solving batch $i" rng perturbed_loads_sum + model = build_model_uc(instance_) + uc_bnb_dataset(instance_, save_file; data_dir=data_dir, model=model) + uc_random_dataset!(instance_, save_file; data_dir=data_dir, model=model) end From 7fa791c65b6fc2fcc7295ff6d3b8b5a70492ace6 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 17:02:43 -0500 Subject: [PATCH 35/74] fix --- examples/unitcommitment/bnb_dataset.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index 4cde29f..4014c5f 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -167,7 +167,7 @@ function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), dat @info "Solved nominal instance" optimal_obj length(my_callback_function.my_storage_vars) # post process - is_relaxed = findall(x -> x == 1, is_relaxed) + is_relaxed = findall(x -> x == 1, my_callback_function.is_relaxed) my_storage_vars = my_callback_function.my_storage_vars my_storage_obj = my_callback_function.my_storage_obj @@ -203,6 +203,7 @@ function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), dat # Save if filetype === ArrowFile Arrow.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".arrow"), df_in) + println(df_out) Arrow.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".arrow"), df_out) else CSV.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".csv"), df_in) From 23b569836695e56b7466a4f989e1445ed56111b9 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 17:02:51 -0500 Subject: [PATCH 36/74] fix --- examples/unitcommitment/slurm/slurm.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index 6384706..436d2a2 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -15,8 +15,8 @@ end using Distributed, ClusterManagers -np = 4 # -addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=0) +np = 1 # +addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=0, partition="debug", t="08:00:00") println("We are all connected and ready.") From 30d33f4ee00aba5428c67ded89b908f3131f93d4 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 20:02:33 -0500 Subject: [PATCH 37/74] working code --- examples/unitcommitment/bnb_dataset.jl | 2 -- examples/unitcommitment/compress_arrow.jl | 12 +++++++++++- examples/unitcommitment/slurm/slurm.jl | 10 ++-------- examples/unitcommitment/uc_dataset_generation.jl | 3 ++- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index 4014c5f..14b43a2 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -203,7 +203,6 @@ function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), dat # Save if filetype === ArrowFile Arrow.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".arrow"), df_in) - println(df_out) Arrow.write(joinpath(data_dir, save_file * "_output_" * string(batch_id) * ".arrow"), df_out) else CSV.write(joinpath(data_dir, save_file * "_input_" * string(batch_id) * ".csv"), df_in) @@ -213,7 +212,6 @@ function uc_bnb_dataset(instance, save_file; model=build_model_uc(instance), dat @info "Saved dataset to $(data_dir)" batch_id length(instances_ids) length(is_relaxed) optimal_obj return - end """ diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 14b9a67..132f838 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -11,15 +11,25 @@ path_dataset = joinpath( case_file_path = path_dataset # joinpath(path_dataset, case_name) # Load input and output data tables -iter_files = readdir(joinpath(case_file_path, "input")) +iter_files = readdir(case_file_path) +iter_files = [ + joinpath(case_file_path, file) for file in iter_files if occursin(case_name, file) +] file_ins = [ joinpath(case_file_path, file) for file in iter_files if occursin("input", file) ] batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] +@info "Compressing files in $(case_file_path)" batch_ids + file_name = split(split(file_ins[1], "_input")[1], "/")[end] +# move input files to input folder +for file in file_ins + mv(file, joinpath(case_file_path, "input")) +end + # compress output files per batch id iter_files = readdir(joinpath(case_file_path)) for batch_id in batch_ids diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index 436d2a2..1d89cdc 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -15,15 +15,9 @@ end using Distributed, ClusterManagers -np = 1 # -addprocs(SlurmManager(np), job_file_loc = ARGS[1], cpus_per_task=24, mem_per_cpu=0, partition="debug", t="08:00:00") +np = 4 # +addprocs(SlurmManager(np), job_file_loc = ARGS[1]) #cpus_per_task=24, mem_per_cpu=24, partition="debug", t="08:00:00") println("We are all connected and ready.") include(ARGS[2]) - -# The Slurm resource allocation is released when all the workers have -# exited -for i in workers() - rmprocs(i) -end \ No newline at end of file diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index 91a2840..98f24eb 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -61,7 +61,7 @@ for i in 1:length(instance.buses) nominal_loads[i] = bus.load[1:horizon] end -@distributed for i in 1:num_batches +@sync @distributed for i in 1:num_batches rng = MersenneTwister(round(Int, i * time())) instance_ = deepcopy(instance) uc_load_disturbances!(rng, instance_, nominal_loads) @@ -77,3 +77,4 @@ end uc_random_dataset!(instance_, save_file; data_dir=data_dir, model=model) end +include(joinpath(dirname(@__FILE__), "compress_arrow.jl")) \ No newline at end of file From 85123271d2a829250a3ba248f67d0f047e1f9cf9 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 22:30:12 -0500 Subject: [PATCH 38/74] update --- examples/unitcommitment/compress_arrow.jl | 6 ++++-- examples/unitcommitment/visualize.jl | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 132f838..6e5fe71 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -26,8 +26,10 @@ batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] file_name = split(split(file_ins[1], "_input")[1], "/")[end] # move input files to input folder -for file in file_ins - mv(file, joinpath(case_file_path, "input")) +for file in iter_files + if occursin("input", file) + mv(joinpath(case_file_path, file), joinpath(case_file_path, "input", file)) + end end # compress output files per batch id diff --git a/examples/unitcommitment/visualize.jl b/examples/unitcommitment/visualize.jl index a5abe08..c1e7d32 100644 --- a/examples/unitcommitment/visualize.jl +++ b/examples/unitcommitment/visualize.jl @@ -6,7 +6,7 @@ using DataFrames case_name = "case300" date = "2017-01-01" horizon = "2" -path_dataset = joinpath(pwd(), "examples", "unitcommitment", "data") +path_dataset = joinpath(dirname(@__FILE__), "data") case_file_path = joinpath(path_dataset, case_name, date, "h"*horizon) # Load input and output data tables From 9d2e998cc5a3a0584fcfc623282aadf86fc87478 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Thu, 25 Jan 2024 22:46:07 -0500 Subject: [PATCH 39/74] update --- examples/unitcommitment/compress_arrow.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 6e5fe71..1f9b4c0 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -28,7 +28,7 @@ file_name = split(split(file_ins[1], "_input")[1], "/")[end] # move input files to input folder for file in iter_files if occursin("input", file) - mv(joinpath(case_file_path, file), joinpath(case_file_path, "input", file)) + mv(joinpath(case_file_path, file), joinpath(case_file_path, "input", file), force=true) end end From cb69b81402ad648a5eabf2228fa454ebee5425e2 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 01:29:23 -0500 Subject: [PATCH 40/74] update --- examples/unitcommitment/compress_arrow.jl | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 1f9b4c0..6707802 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -5,16 +5,17 @@ using DataFrames case_name = "case300" date = "2017-01-01" horizon = 2 -path_dataset = joinpath( +case_file_path = joinpath( dirname(@__FILE__), "data", case_name, date, "h" * string(horizon) ) -case_file_path = path_dataset # joinpath(path_dataset, case_name) # Load input and output data tables iter_files = readdir(case_file_path) + iter_files = [ - joinpath(case_file_path, file) for file in iter_files if occursin(case_name, file) + file for file in iter_files if occursin(case_name, file) && occursin("arrow", file) ] + file_ins = [ joinpath(case_file_path, file) for file in iter_files if occursin("input", file) ] @@ -33,15 +34,17 @@ for file in iter_files end # compress output files per batch id -iter_files = readdir(joinpath(case_file_path)) for batch_id in batch_ids - file_outs = [ - joinpath(case_file_path, file) + file_outs_names = [ + file for file in iter_files - if occursin("output", file) && occursin(batch_id, file) && occursin("arrow", file) + if occursin("output", file) && occursin(batch_id, file) ] - if length(file_outs) == 1 - continue + + file_outs = [ joinpath(case_file_path, file) for file in file_outs_names ] + + if length(file_outs_names) == 1 + mv(file_outs[1], joinpath(case_file_path, "output", file_outs_names[1]), force=true) end # Load input and output data tables From 8daaa18f29b488a483e46f7bc29298428c36f58e Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 11:11:21 -0500 Subject: [PATCH 41/74] fix error --- examples/unitcommitment/bnb_dataset.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index 14b43a2..fbc55ae 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -252,6 +252,8 @@ function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), else CSV.read(joinpath(data_dir, input_file), DataFrame) end + # delete old file + rm(joinpath(data_dir, input_file)) for bus in instance.buses for t in 1:instance.time df_in[!, Symbol("load_" * string(bus.name) * "_" * string(t))] = fill(bus.load[t], length(df_in.id)) From f92ad0a8109b11a0a30898c68e520eedeae64458 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 11:11:42 -0500 Subject: [PATCH 42/74] add comments --- examples/unitcommitment/bnb_dataset.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/unitcommitment/bnb_dataset.jl b/examples/unitcommitment/bnb_dataset.jl index fbc55ae..65e2693 100644 --- a/examples/unitcommitment/bnb_dataset.jl +++ b/examples/unitcommitment/bnb_dataset.jl @@ -254,11 +254,13 @@ function uc_random_dataset!(instance, save_file; model=build_model_uc(instance), end # delete old file rm(joinpath(data_dir, input_file)) + # add loads for bus in instance.buses for t in 1:instance.time df_in[!, Symbol("load_" * string(bus.name) * "_" * string(t))] = fill(bus.load[t], length(df_in.id)) end end + # save if filetype === ArrowFile Arrow.write(joinpath(data_dir, input_file), df_in) else From e90172d16210b003ca2853aec9d07c3c709c83ec Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 11:33:59 -0500 Subject: [PATCH 43/74] fix typo --- examples/unitcommitment/compress_arrow.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/unitcommitment/compress_arrow.jl b/examples/unitcommitment/compress_arrow.jl index 6707802..3bde2ca 100644 --- a/examples/unitcommitment/compress_arrow.jl +++ b/examples/unitcommitment/compress_arrow.jl @@ -45,6 +45,7 @@ for batch_id in batch_ids if length(file_outs_names) == 1 mv(file_outs[1], joinpath(case_file_path, "output", file_outs_names[1]), force=true) + continue end # Load input and output data tables From 21254aeaf7d9310d1daf841aaeb5b4bfbf373cf0 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Fri, 26 Jan 2024 12:09:07 -0500 Subject: [PATCH 44/74] update visuals --- examples/unitcommitment/visualize.jl | 63 ++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/examples/unitcommitment/visualize.jl b/examples/unitcommitment/visualize.jl index c1e7d32..9d75bb0 100644 --- a/examples/unitcommitment/visualize.jl +++ b/examples/unitcommitment/visualize.jl @@ -38,27 +38,56 @@ input_table.total_commitment = sum(eachcol(input_table[!, commit_columns])) input_table.total_load = sum(eachcol(input_table[!, load_columns])) # join input and output tables keep on id and keep only total_commitment and objective -table_train = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id)[!, [:id, :total_commitment, :objective]] -# separate bnb into entries that have total commitment different from integer and those that have -# total commitment equal to integer -table_train_bnb_not_integer = table_train_bnb[table_train_bnb.total_commitment .!= floor.(table_train_bnb.total_commitment), :] -table_train_bnb_integer = table_train_bnb[table_train_bnb.total_commitment .== floor.(table_train_bnb.total_commitment), :] +table_train = innerjoin(input_table, output_table[!, [:id, :objective, :status]]; on=:id)[!, [:id, :total_commitment, :objective, :status, :total_load]] -# plot the data +# separate infeasible from feasible +table_train_optimal = table_train[table_train.status .== "OPTIMAL", :] +table_train_infeasible = table_train[table_train.status .== "INFEASIBLE", :] +table_train_localopt = table_train[table_train.status .== "LOCALLY_SOLVED", :] + +# plot objective vs total commitment # now with the y axis on the log scale +plotly() plt = scatter( - table_train_random.total_commitment, - table_train_random.objective, - label="Random", xlabel="Total Commitment", ylabel="Objective", + table_train_optimal.total_commitment, + table_train_optimal.objective, + label="Optimal", xlabel="Total Commitment", ylabel="Objective", title="", color=:red, yscale=:log10, legend=:outertopright, - alpha=0.2 + # marker size + ms=10, +); +scatter!(plt, + table_train_localopt.total_commitment, + table_train_localopt.objective, + label="Local Optimum", color=:blue, + marker=:o, alpha=0.5 ); scatter!(plt, - table_train_bnb_not_integer.total_commitment, - table_train_bnb_not_integer.objective, - label="Branch and Bound Node", color=:blue, - marker=:x + table_train_infeasible.total_commitment, + table_train_infeasible.objective, + label="Infeasible", color=:yellow, + marker=:square, alpha=0.01, ms=2 +) + +# plot objective vs total load +# now with the y axis on the log scale +plt2 = scatter( + table_train_optimal.total_load, + table_train_optimal.objective, + label="Optimal", xlabel="Total Load", ylabel="Objective", + title="", color=:red, yscale=:log10, legend=:outertopright, + # marker size + ms=10, +); +scatter!(plt2, + table_train_localopt.total_load, + table_train_localopt.objective, + label="Local Optimum", color=:blue, + marker=:o, alpha=0.5 ); -scatter!(plt, table_train_bnb_integer.total_commitment, table_train_bnb_integer.objective, label="Branch and Bound Leaf", color=:yellow, - alpha=0.2 -) \ No newline at end of file +scatter!(plt2, + table_train_infeasible.total_load, + table_train_infeasible.objective, + label="Infeasible", color=:yellow, + marker=:square, alpha=0.5 +) From 373eac9c8fb1e7d228a16fb7c2a5086b6af5b71a Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 15:01:58 -0500 Subject: [PATCH 45/74] update --- .../unitcommitment/uc_dataset_generation.jl | 2 +- examples/unitcommitment/uc_nn_training.jl | 37 +++++++++++++++++++ examples/unitcommitment/unitcommitment.jl | 30 ++++++++------- 3 files changed, 55 insertions(+), 14 deletions(-) create mode 100644 examples/unitcommitment/uc_nn_training.jl diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index 98f24eb..b18ab37 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -19,7 +19,7 @@ using Random @everywhere include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) -data_dir = joinpath(dirname(@__FILE__), "data") # joinpath(pwd(), "examples/unitcommitment", "data") +data_dir = joinpath(dirname(@__FILE__), "data") ############## # Parameters diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl new file mode 100644 index 0000000..678beeb --- /dev/null +++ b/examples/unitcommitment/uc_nn_training.jl @@ -0,0 +1,37 @@ +################################################## +######### Unit Commitment Proxy Training ######### +################################################## + +############## +# Load Functions +############## + +import Pkg; Pkg.activate(dirname(dirname(@__DIR__))); Pkg.instantiate() + +using L2O +using MLJFlux +using CUDA +using Flux +using MLJ + +include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) + +include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) + +data_dir = joinpath(dirname(@__FILE__), "data") + +############## +# Parameters +############## +filetype=ArrowFile +case_name = ARGS[3] # case_name = "case300" +date = ARGS[4] # date="2017-01-01" +horizon = parse(Int, ARGS[5]) # horizon=2 +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) +data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) + +############## +# Fit DNN approximator +############## + +# Read input and output data \ No newline at end of file diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index ba89d3f..0807db1 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -6,30 +6,34 @@ # Load Functions ############## -include("examples/unitcommitment/bnb_dataset.jl") +import Pkg; Pkg.activate(dirname(dirname(@__DIR__))); Pkg.instantiate() -include("src/cutting_planes.jl") +using MLJFlux +using CUDA +using Flux +using MLJ + +include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) -data_dir = joinpath(pwd(), "examples/unitcommitment", "data") # joinpath(dirname(@__FILE__), "data") +include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) + +data_dir = joinpath(dirname(@__FILE__), "data") ############## -# Load Instance +# Parameters ############## -case_name = "case300" -date = "2017-01-01" -save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(instance.time) +filetype=ArrowFile +case_name = ARGS[3] # case_name = "case300" +date = ARGS[4] # date="2017-01-01" +horizon = parse(Int, ARGS[5]) # horizon=2 +save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) +data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) ############## # Fit DNN approximator ############## -using MLJFlux -using CUDA -using Flux -using MLJ # Read input and output data -input_data = CSV.read(joinpath(data_dir, input_file), DataFrame) -output_data = CSV.read(joinpath(data_dir, output_file), DataFrame) # Separate input and output variables output_variables = output_data[!, :objective] From 6124bf211600ad9334932ef0810ae56db36fb6d5 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 16:02:25 -0500 Subject: [PATCH 46/74] update code --- LocalPreferences.toml | 3 + Project.toml | 21 +++--- examples/unitcommitment/uc_nn_training.jl | 88 ++++++++++++++++++++++- 3 files changed, 100 insertions(+), 12 deletions(-) create mode 100644 LocalPreferences.toml diff --git a/LocalPreferences.toml b/LocalPreferences.toml new file mode 100644 index 0000000..1258832 --- /dev/null +++ b/LocalPreferences.toml @@ -0,0 +1,3 @@ +[CUDA_Runtime_jll] +__clear__ = ["local"] +version = "12.1" diff --git a/Project.toml b/Project.toml index 6aaa13f..f18dc7d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,8 +1,17 @@ +authors = ["andrewrosemberg and contributors"] name = "L2O" uuid = "e1d8bfa7-c465-446a-84b9-451470f6e76c" -authors = ["andrewrosemberg and contributors"] version = "1.2.0-DEV" +[compat] +Arrow = "2" +CSV = "0.10" +Dualization = "0.5" +JuMP = "1" +ParametricOptInterface = "0.7" +Zygote = "^0.6.68" +julia = "1.6" + [deps] Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45" CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" @@ -19,17 +28,9 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" -[compat] -Arrow = "2" -CSV = "0.10" -Dualization = "0.5" -JuMP = "1" -ParametricOptInterface = "0.7" -Zygote ="^0.6.68" -julia = "1.6" - [extras] AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918" +CUDA_Runtime_jll = "76a88914-d11a-5bdc-97e0-2f5a05c973a2" Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 678beeb..72500f4 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -10,9 +10,10 @@ import Pkg; Pkg.activate(dirname(dirname(@__DIR__))); Pkg.instantiate() using L2O using MLJFlux -using CUDA +using CUDA # if error run CUDA.set_runtime_version!(v"12.1.0") using Flux using MLJ +using DataFrames include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) @@ -34,4 +35,87 @@ data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) # Fit DNN approximator ############## -# Read input and output data \ No newline at end of file +# Read input and output data +iter_input = readdir(joinpath(data_dir, "input"), join=true) +iter_output = readdir(joinpath(data_dir, "output"), join=true) +# filter for only arrow files of this case +iter_input = [file for file in iter_input if occursin(case_name, file) && occursin("arrow", file)] +iter_output = [file for file in iter_output if occursin(case_name, file) && occursin("arrow", file)] + +# Load input and output data tables +input_tables = Array{DataFrame}(undef, length(iter_input)) +output_tables = Array{DataFrame}(undef, length(iter_output)) +for (i, file) in enumerate(iter_input) + input_tables[i] = Arrow.Table(file) |> DataFrame +end +for (i, file) in enumerate(iter_output) + output_tables[i] = Arrow.Table(file) |> DataFrame +end + +# concatenate all the input and output tables +input_table = vcat(input_tables...) +output_table = vcat(output_tables...) + +# Separate input and output variables & ignore id time status primal_status dual_status +y = output_table[!, :objective] +input_features = innerjoin(input_table, output_table[!, [:id]], on = :id)[!, Not(:id)] # just use success solves +X = Matrix(input_features) + +# optimiser=Flux.Optimise.Adam() +optimiser=ConvexRule( + Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) +) +nn = MultitargetNeuralNetworkRegressor(; + builder=FullyConnectedBuilder([1024, 1024, 300, 64, 32]), # [1024, 300, 64, 32] + rng=123, + epochs=100, + optimiser=optimiser, + acceleration=CUDALibs(), + batch_size=24, +) + +# Constrols + +clear() = begin + global losses = [] + global training_losses = [] + global epochs = [] + return nothing +end + +function update_loss(loss) + @info "Loss: $loss" + push!(losses, loss) +end +update_training_loss(report) = + push!(training_losses, + report.training_losses[end]) +update_epochs(epoch) = push!(epochs, epoch) + +controls=[Step(1), + NumberSinceBest(6), + InvalidValue(), + TimeLimit(5/60), + WithLossDo(update_loss), + WithReportDo(update_training_loss), + WithIterationsDo(update_epochs) +] + +# WIP +# function SobolevLoss_mse(x, y) +# o_term = Flux.mse(x, y[:, 1]) +# d_term = Flux.mse(gradient( ( _x ) -> sum(layer( _x )), x), y[:, 2:end]) +# return o_term + d_term * 0.1 +# end + +iterated_pipe = + IteratedModel(model=nn, + controls=controls, + resampling=Holdout(fraction_train=0.8), + measure = l2, +) + +# Fit model +clear() +mach = machine(iterated_pipe, X, y) +fit!(mach; verbosity=2) \ No newline at end of file From d6114086e120d314d70e4c3e36759c7ee2fae152 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 17:47:43 -0500 Subject: [PATCH 47/74] fix --- examples/unitcommitment/uc_nn_training.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 72500f4..f9a3130 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -57,9 +57,10 @@ input_table = vcat(input_tables...) output_table = vcat(output_tables...) # Separate input and output variables & ignore id time status primal_status dual_status -y = output_table[!, :objective] -input_features = innerjoin(input_table, output_table[!, [:id]], on = :id)[!, Not(:id)] # just use success solves +train_table = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id) +input_features = train_table[!, Not([:id, :objective])] X = Matrix(input_features) +y = Matrix(train_table[!, [:objective]]) # optimiser=Flux.Optimise.Adam() optimiser=ConvexRule( From c17c7b78e78e3fb59428c2c9fe337c41ad3e7928 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 18:19:49 -0500 Subject: [PATCH 48/74] update code train --- examples/unitcommitment/uc_nn_training.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index f9a3130..48b0913 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -13,8 +13,11 @@ using MLJFlux using CUDA # if error run CUDA.set_runtime_version!(v"12.1.0") using Flux using MLJ + using DataFrames +using Wandb, Dates, Logging + include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) From 1869691af105b893c0baaf19b8f2f59b1a7911a1 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 18:42:38 -0500 Subject: [PATCH 49/74] update --- .gitignore | 1 + examples/unitcommitment/uc_nn_training.jl | 43 ++++++++++++++++++----- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 9d1e2ad..ca77f2c 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ Manifest.toml *.sbatch examples/unitcommitment/app/* *.edu +examples/unitcommitment/wandb/* diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 48b0913..13932ae 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -65,17 +65,32 @@ input_features = train_table[!, Not([:id, :objective])] X = Matrix(input_features) y = Matrix(train_table[!, [:objective]]) -# optimiser=Flux.Optimise.Adam() +# Define model and logger +lg = WandbLogger( + project = "unit_commitment_proxies", + name = "$(case_name)-$(date)-h$(horizon)-$(now())", + config = Dict( + "layers" => [300, 64, 32], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] + "batch_size" => 24, + "optimiser" => "ConvexRule", + "learning_rate" => 0.01, + "rng" => 123, + ) +) + +global_logger(lg) + optimiser=ConvexRule( - Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) + Flux.Optimise.Adam(get_config(lg, "learning_rate"), (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) ) + nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([1024, 1024, 300, 64, 32]), # [1024, 300, 64, 32] - rng=123, + builder=FullyConnectedBuilder(get_config(lg, "layers")), + rng=get_config(lg, "rng"), epochs=100, optimiser=optimiser, acceleration=CUDALibs(), - batch_size=24, + batch_size=get_config(lg, "batch_size"), ) # Constrols @@ -88,12 +103,19 @@ clear() = begin end function update_loss(loss) - @info "Loss: $loss" + @info "metrics" loss=loss push!(losses, loss) + return nothing end -update_training_loss(report) = + +function update_training_loss(report) + @info "training_losses" training_loss=report.training_losses[end] push!(training_losses, - report.training_losses[end]) + report.training_losses[end] + ) + return nothing +end + update_epochs(epoch) = push!(epochs, epoch) controls=[Step(1), @@ -122,4 +144,7 @@ iterated_pipe = # Fit model clear() mach = machine(iterated_pipe, X, y) -fit!(mach; verbosity=2) \ No newline at end of file +fit!(mach; verbosity=2) + +# Finish the run +close(lg) \ No newline at end of file From 5dafe580843d1baeeb2463b7d899a0b49b73856e Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Fri, 26 Jan 2024 18:49:33 -0500 Subject: [PATCH 50/74] update --- examples/unitcommitment/uc_nn_training.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 13932ae..01fc522 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -28,9 +28,9 @@ data_dir = joinpath(dirname(@__FILE__), "data") # Parameters ############## filetype=ArrowFile -case_name = ARGS[3] # case_name = "case300" -date = ARGS[4] # date="2017-01-01" -horizon = parse(Int, ARGS[5]) # horizon=2 +case_name = ARGS[1] # case_name = "case300" +date = ARGS[2] # date="2017-01-01" +horizon = parse(Int, ARGS[3]) # horizon=2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) From 1d4a07b2384637b6c45c633dcfb0247c26e2db42 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sat, 27 Jan 2024 16:17:39 -0500 Subject: [PATCH 51/74] update --- examples/unitcommitment/uc_nn_training.jl | 38 ++++++++++++----------- examples/unitcommitment/unitcommitment.jl | 2 +- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 01fc522..32a87ef 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -87,7 +87,7 @@ optimiser=ConvexRule( nn = MultitargetNeuralNetworkRegressor(; builder=FullyConnectedBuilder(get_config(lg, "layers")), rng=get_config(lg, "rng"), - epochs=100, + epochs=5000, optimiser=optimiser, acceleration=CUDALibs(), batch_size=get_config(lg, "batch_size"), @@ -95,33 +95,35 @@ nn = MultitargetNeuralNetworkRegressor(; # Constrols -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end +# clear() = begin +# global losses = [] +# global training_losses = [] +# global epochs = [] +# return nothing +# end function update_loss(loss) @info "metrics" loss=loss - push!(losses, loss) + # push!(losses, loss) return nothing end function update_training_loss(report) - @info "training_losses" training_loss=report.training_losses[end] - push!(training_losses, - report.training_losses[end] - ) + @info "metrics" training_loss=report.training_losses[end] + # push!(training_losses, + # report.training_losses[end] + # ) return nothing end -update_epochs(epoch) = push!(epochs, epoch) +update_epochs(epoch) = @info "log" epoch=epoch # push!(epochs, epoch) controls=[Step(1), - NumberSinceBest(6), + # NumberSinceBest(20), + PQ(), + GL(; alpha=2.0), InvalidValue(), - TimeLimit(5/60), + TimeLimit(; t=5/60), WithLossDo(update_loss), WithReportDo(update_training_loss), WithIterationsDo(update_epochs) @@ -137,12 +139,12 @@ controls=[Step(1), iterated_pipe = IteratedModel(model=nn, controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = l2, + resampling=Holdout(fraction_train=0.7), + measure = Flux.mae, ) # Fit model -clear() +# clear() mach = machine(iterated_pipe, X, y) fit!(mach; verbosity=2) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 0807db1..f356e05 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -93,7 +93,7 @@ iterated_pipe = IteratedModel(model=nn, controls=controls, resampling=Holdout(fraction_train=0.8), - measure = l2, + measure = Flux.mae, ) # Fit model From fb22524faa1d6d94abba618ed3cd6e020ee454a7 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sat, 27 Jan 2024 17:42:31 -0500 Subject: [PATCH 52/74] faster train float32 --- examples/unitcommitment/uc_nn_training.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 32a87ef..12f730b 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -62,8 +62,8 @@ output_table = vcat(output_tables...) # Separate input and output variables & ignore id time status primal_status dual_status train_table = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id) input_features = train_table[!, Not([:id, :objective])] -X = Matrix(input_features) -y = Matrix(train_table[!, [:objective]]) +X = Float32.(Matrix(input_features)) +y = Float32.(Matrix(train_table[!, [:objective]])) # Define model and logger lg = WandbLogger( From fab25a64ef7e297deab66528e648b3e4f7e8266a Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sat, 27 Jan 2024 21:18:50 -0500 Subject: [PATCH 53/74] update --- examples/unitcommitment/uc_nn_training.jl | 25 +++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 12f730b..f32b53b 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -70,15 +70,16 @@ lg = WandbLogger( project = "unit_commitment_proxies", name = "$(case_name)-$(date)-h$(horizon)-$(now())", config = Dict( - "layers" => [300, 64, 32], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] + "layers" => [1024, 1024, 300, 64, 32], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] "batch_size" => 24, "optimiser" => "ConvexRule", "learning_rate" => 0.01, "rng" => 123, + "lambda" => 0.3, ) ) -global_logger(lg) +# global_logger(lg) optimiser=ConvexRule( Flux.Optimise.Adam(get_config(lg, "learning_rate"), (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) @@ -91,6 +92,7 @@ nn = MultitargetNeuralNetworkRegressor(; optimiser=optimiser, acceleration=CUDALibs(), batch_size=get_config(lg, "batch_size"), + lambda=get_config(lg, "lambda"), ) # Constrols @@ -103,27 +105,34 @@ nn = MultitargetNeuralNetworkRegressor(; # end function update_loss(loss) - @info "metrics" loss=loss + # @info "metrics" loss=loss + Wandb.log(lg, Dict("metrics/loss" => loss)) # push!(losses, loss) return nothing end function update_training_loss(report) - @info "metrics" training_loss=report.training_losses[end] + # @info "metrics" training_loss=report.training_losses[end] + Wandb.log(lg, Dict("metrics/training_loss" => report.training_losses[end])) # push!(training_losses, # report.training_losses[end] # ) return nothing end -update_epochs(epoch) = @info "log" epoch=epoch # push!(epochs, epoch) +function update_epochs(epoch) + # @info "log" epoch=epoch + Wandb.log(lg, Dict("log/epoch" => epoch)) + # push!(epochs, epoch) + return nothing +end controls=[Step(1), # NumberSinceBest(20), - PQ(), - GL(; alpha=2.0), + # PQ(; alpha=0.9, k=30), + GL(; alpha=20.0), InvalidValue(), - TimeLimit(; t=5/60), + TimeLimit(; t=1), WithLossDo(update_loss), WithReportDo(update_training_loss), WithIterationsDo(update_epochs) From 410af0aaa66e08b36732e5d3db3230fb5eabfb8d Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sun, 28 Jan 2024 01:05:10 -0500 Subject: [PATCH 54/74] update --- examples/unitcommitment/uc_nn_training.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index f32b53b..5679573 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -70,7 +70,7 @@ lg = WandbLogger( project = "unit_commitment_proxies", name = "$(case_name)-$(date)-h$(horizon)-$(now())", config = Dict( - "layers" => [1024, 1024, 300, 64, 32], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] + "layers" => [1024, 512, 64], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] "batch_size" => 24, "optimiser" => "ConvexRule", "learning_rate" => 0.01, @@ -130,7 +130,7 @@ end controls=[Step(1), # NumberSinceBest(20), # PQ(; alpha=0.9, k=30), - GL(; alpha=20.0), + GL(; alpha=200.0), InvalidValue(), TimeLimit(; t=1), WithLossDo(update_loss), From 4c6599f4ab0d4ad0f0b05e5ee5e756498f3603c3 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sun, 28 Jan 2024 01:08:18 -0500 Subject: [PATCH 55/74] update generation --- examples/unitcommitment/slurm/slurm.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/unitcommitment/slurm/slurm.jl b/examples/unitcommitment/slurm/slurm.jl index 1d89cdc..c11ade0 100644 --- a/examples/unitcommitment/slurm/slurm.jl +++ b/examples/unitcommitment/slurm/slurm.jl @@ -15,7 +15,7 @@ end using Distributed, ClusterManagers -np = 4 # +np = 20 # addprocs(SlurmManager(np), job_file_loc = ARGS[1]) #cpus_per_task=24, mem_per_cpu=24, partition="debug", t="08:00:00") println("We are all connected and ready.") From f3fee5179115530d59f020d50ac902dfb8d6460a Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sun, 28 Jan 2024 12:56:01 -0500 Subject: [PATCH 56/74] update --- examples/unitcommitment/uc_nn_training.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 5679573..28a5c67 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -132,7 +132,7 @@ controls=[Step(1), # PQ(; alpha=0.9, k=30), GL(; alpha=200.0), InvalidValue(), - TimeLimit(; t=1), + TimeLimit(; t=24), WithLossDo(update_loss), WithReportDo(update_training_loss), WithIterationsDo(update_epochs) From 7625e6435851b3a2f0a4e518defd30c29b302223 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Sun, 28 Jan 2024 13:26:12 -0500 Subject: [PATCH 57/74] update gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ca77f2c..9315d46 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ Manifest.toml examples/unitcommitment/app/* *.edu examples/unitcommitment/wandb/* +*.png From b90795b0f26d82505265976871f25fdcfa63d062 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sun, 28 Jan 2024 13:40:00 -0500 Subject: [PATCH 58/74] update objective --- examples/unitcommitment/uc_nn_training.jl | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 28a5c67..d8ad015 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -17,6 +17,7 @@ using MLJ using DataFrames using Wandb, Dates, Logging +using Statistics include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) @@ -85,6 +86,14 @@ optimiser=ConvexRule( Flux.Optimise.Adam(get_config(lg, "learning_rate"), (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) ) +function relative_rmse(ŷ, y) + return sqrt(mean(((ŷ .- y) ./ y) .^ 2)) +end + +function relative_mae(ŷ, y) + return mean(abs.((ŷ .- y) ./ y)) +end + nn = MultitargetNeuralNetworkRegressor(; builder=FullyConnectedBuilder(get_config(lg, "layers")), rng=get_config(lg, "rng"), @@ -93,6 +102,7 @@ nn = MultitargetNeuralNetworkRegressor(; acceleration=CUDALibs(), batch_size=get_config(lg, "batch_size"), lambda=get_config(lg, "lambda"), + loss=relative_rmse, ) # Constrols @@ -149,7 +159,7 @@ iterated_pipe = IteratedModel(model=nn, controls=controls, resampling=Holdout(fraction_train=0.7), - measure = Flux.mae, + measure = relative_mae, ) # Fit model From be1730116cd3ffd4fac1074d3f4572be844db685 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Sun, 28 Jan 2024 17:43:10 -0500 Subject: [PATCH 59/74] fix update --- README.md | 2 +- test/datasetgen.jl | 2 +- test/worst_case.jl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 562866c..4603eb7 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ The user needs to first define a problem iterator: # The problem to iterate over model = Model(() -> POI.Optimizer(HiGHS.Optimizer())) @variable(model, x) -p = @variable(model, p in POI.Parameter(1.0)) # The parameter (defined using POI) +p = @variable(model, p in MOI.Parameter(1.0)) # The parameter (defined using POI) @constraint(model, cons, x + p >= 3) @objective(model, Min, 2x) diff --git a/test/datasetgen.jl b/test/datasetgen.jl index f734179..72edfbe 100644 --- a/test/datasetgen.jl +++ b/test/datasetgen.jl @@ -9,7 +9,7 @@ function test_problem_iterator(path::AbstractString) # The problem to iterate over model = JuMP.Model(() -> POI.Optimizer(HiGHS.Optimizer())) @variable(model, x) - p = @variable(model, _p in POI.Parameter(1.0)) + p = @variable(model, _p in MOI.Parameter(1.0)) @constraint(model, cons, x + _p >= 3) @objective(model, Min, 2x) diff --git a/test/worst_case.jl b/test/worst_case.jl index 902bb70..a068fd9 100644 --- a/test/worst_case.jl +++ b/test/worst_case.jl @@ -89,7 +89,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) [CSVFile, ArrowFile] function _primal_builder!(; recorder=nothing) model = JuMP.Model(() -> POI.Optimizer(HiGHS.Optimizer())) - parameters = @variable(model, _p in POI.Parameter(1.0)) + parameters = @variable(model, _p in MOI.Parameter(1.0)) @variable(model, x) @constraint(model, cons, x + parameters >= 3) @objective(model, Min, 2x) From 6177a77fda0ea4d0f51d0aad5a9892138e46d5af Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Sun, 28 Jan 2024 17:44:19 -0500 Subject: [PATCH 60/74] update lambda --- examples/unitcommitment/uc_nn_training.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index d8ad015..22f0302 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -76,7 +76,7 @@ lg = WandbLogger( "optimiser" => "ConvexRule", "learning_rate" => 0.01, "rng" => 123, - "lambda" => 0.3, + "lambda" => 0.01, ) ) From b22f67045bc65a634f589fdedc908f22086df6ef Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Sun, 28 Jan 2024 18:23:38 -0500 Subject: [PATCH 61/74] update fix --- examples/powermodels/pglib_datagen.jl | 4 ++-- test/worst_case.jl | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/examples/powermodels/pglib_datagen.jl b/examples/powermodels/pglib_datagen.jl index 72053c3..642d1ca 100644 --- a/examples/powermodels/pglib_datagen.jl +++ b/examples/powermodels/pglib_datagen.jl @@ -185,7 +185,7 @@ function generate_dataset_pglib( [network_data["load"]["$l"]["pd"] for l in 1:num_loads], [network_data["load"]["$l"]["qd"] for l in 1:num_loads], ) - p = load_parameter_factory(model, 1:num_inputs; load_set=POI.Parameter.(original_load)) + p = load_parameter_factory(model, 1:num_inputs; load_set=MOI.Parameter.(original_load)) # Build model and Recorder file = joinpath( @@ -250,7 +250,7 @@ function generate_worst_case_dataset_Nonconvex( [l["qd"] for l in values(network_data["load"])], ) p = load_parameter_factory( - model, 1:(num_loads * 2); load_set=POI.Parameter.(original_load) + model, 1:(num_loads * 2); load_set=MOI.Parameter.(original_load) ) # Define batch id diff --git a/test/worst_case.jl b/test/worst_case.jl index a068fd9..c94c9f1 100644 --- a/test/worst_case.jl +++ b/test/worst_case.jl @@ -34,6 +34,11 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) optimizer_factory, ) + # Test Build Primal + model = JuMP.Model() + parameters = problem_iterator.parameters(model) + problem_iterator.primal_builder!(model, parameters) + # file_names batch_id = string(uuid1()) file_input = joinpath(path, "test_$(batch_id)_input") @@ -41,7 +46,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) # The recorder recorder = Recorder{filetype}( - file_output; filename_input=file_input, primal_variables=[], dual_variables=[] + file_output; filename_input=file_input, primal_variables=[model[:x]], dual_variables=[] ) # Solve all problems and record solutions @@ -118,6 +123,9 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) options=NLoptOptions(; maxeval=10), ) + # Test Build Primal + model, parameters = problem_iterator.primal_builder!() + # file_names batch_id = string(uuid1()) file_input = joinpath(path, "test_$(batch_id)_input") @@ -125,7 +133,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) # The recorder recorder = Recorder{filetype}( - file_output; filename_input=file_input, primal_variables=[], dual_variables=[] + file_output; filename_input=file_input, primal_variables=[model[:x]], dual_variables=[] ) # Solve all problems and record solutions From bff1b15aa7d59afcfdc6488093f8c3bb6a7f78f0 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 13:19:41 -0500 Subject: [PATCH 62/74] update --- examples/unitcommitment/uc_nn_training.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 22f0302..58b2230 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -72,11 +72,11 @@ lg = WandbLogger( name = "$(case_name)-$(date)-h$(horizon)-$(now())", config = Dict( "layers" => [1024, 512, 64], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] - "batch_size" => 24, + "batch_size" => 32, "optimiser" => "ConvexRule", "learning_rate" => 0.01, "rng" => 123, - "lambda" => 0.01, + # "lambda" => 0.00, ) ) @@ -101,7 +101,7 @@ nn = MultitargetNeuralNetworkRegressor(; optimiser=optimiser, acceleration=CUDALibs(), batch_size=get_config(lg, "batch_size"), - lambda=get_config(lg, "lambda"), + # lambda=get_config(lg, "lambda"), loss=relative_rmse, ) From 95ee6c6d3ca299e835379a3a45088f8557a215c4 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 14:15:04 -0500 Subject: [PATCH 63/74] update --- examples/unitcommitment/uc_dataset_generation.jl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/unitcommitment/uc_dataset_generation.jl b/examples/unitcommitment/uc_dataset_generation.jl index b18ab37..fca6a39 100644 --- a/examples/unitcommitment/uc_dataset_generation.jl +++ b/examples/unitcommitment/uc_dataset_generation.jl @@ -30,6 +30,7 @@ horizon = parse(Int, ARGS[5]) # horizon=2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) num_batches = parse(Int, ARGS[6]) # num_batches=10 solve_nominal = parse(Bool, ARGS[7]) # solve_nominal=true +num_random = parse(Int, ARGS[8]) # num_random=100 data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) mkpath(joinpath(data_dir, "input")) mkpath(joinpath(data_dir, "output")) @@ -51,7 +52,9 @@ instance.time = horizon if solve_nominal model = build_model_uc(instance) uc_bnb_dataset(instance, save_file; data_dir=data_dir, model=model) - uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model) + if num_random > 0 + uc_random_dataset!(instance, save_file; data_dir=data_dir, model=model, num_s=num_random) + end end # save nominal loads in a dictionary @@ -74,7 +77,9 @@ end @info "Solving batch $i" rng perturbed_loads_sum model = build_model_uc(instance_) uc_bnb_dataset(instance_, save_file; data_dir=data_dir, model=model) - uc_random_dataset!(instance_, save_file; data_dir=data_dir, model=model) + if num_random > 0 + uc_random_dataset!(instance_, save_file; data_dir=data_dir, model=model, num_s=num_random) + end end include(joinpath(dirname(@__FILE__), "compress_arrow.jl")) \ No newline at end of file From af9bac16a8a7648352c149148983db021d730367 Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Mon, 29 Jan 2024 14:44:56 -0500 Subject: [PATCH 64/74] fix tests --- examples/powermodels/pglib_datagen.jl | 7 ++++--- src/L2O.jl | 1 + src/datasetgen.jl | 10 ++++++++++ test/worst_case.jl | 3 +++ 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/examples/powermodels/pglib_datagen.jl b/examples/powermodels/pglib_datagen.jl index 642d1ca..dc43e6c 100644 --- a/examples/powermodels/pglib_datagen.jl +++ b/examples/powermodels/pglib_datagen.jl @@ -127,6 +127,7 @@ function pm_primal_builder!( set_dual_variable!(recorder, real_balance) end end + set_model!(recorder) return model, parameters, variable_refs end @@ -191,7 +192,7 @@ function generate_dataset_pglib( file = joinpath( data_sim_dir, case_name * "_" * string(network_formulation) * "_output_" * batch_id ) - recorder = Recorder{filetype}(file; filterfn=filterfn) + recorder = Recorder{filetype}(file; filterfn=filterfn,model=model) pm_primal_builder!( model, p, network_data, network_formulation; recorder=recorder, record_duals=true ) @@ -265,7 +266,7 @@ function generate_worst_case_dataset_Nonconvex( data_sim_dir, case_name * "_" * string(network_formulation) * "_output_" * batch_id ) recorder = Recorder{filetype}( - file_output; filename_input=file_input, primal_variables=[], dual_variables=[] + file_output; filename_input=file_input, primal_variables=[], dual_variables=[], model=model ) # Build model @@ -372,7 +373,7 @@ function generate_worst_case_dataset( data_sim_dir, case_name * "_" * string(network_formulation) * "_output_" * batch_id ) recorder = Recorder{filetype}( - file_output; filename_input=file_input, primal_variables=[], dual_variables=[] + file_output; filename_input=file_input, primal_variables=[], dual_variables=[], model=JuMP.Model() # dummy model ) # Solve all problems and record solutions diff --git a/src/L2O.jl b/src/L2O.jl index 8d49950..1542ccf 100644 --- a/src/L2O.jl +++ b/src/L2O.jl @@ -28,6 +28,7 @@ export ArrowFile, WorstCaseProblemIterator, set_primal_variable!, set_dual_variable!, + set_model!, FullyConnected, FullyConnectedBuilder, make_convex!, diff --git a/src/datasetgen.jl b/src/datasetgen.jl index 9859b54..1561ef0 100644 --- a/src/datasetgen.jl +++ b/src/datasetgen.jl @@ -88,6 +88,16 @@ function set_dual_variable!(recorder::Recorder, p::Vector) return recorder.dual_variables = p end +function set_model!(recorder::Recorder) + recorder.model= if length(recorder.primal_variables) > 0 + owner_model(recorder.primal_variables[1]) + elseif length(recorder.dual_variables) > 0 + owner_model(recorder.dual_variables[1]) + else + @error("No model provided") + end +end + abstract type AbstractProblemIterator end """ diff --git a/test/worst_case.jl b/test/worst_case.jl index c94c9f1..52f5a94 100644 --- a/test/worst_case.jl +++ b/test/worst_case.jl @@ -19,6 +19,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) if !isnothing(recorder) set_primal_variable!(recorder, [x]) set_dual_variable!(recorder, [cons]) + set_model!(recorder) end end function set_iterator!(model, parameters, idx) @@ -36,6 +37,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) # Test Build Primal model = JuMP.Model() + set_optimizer(model, problem_iterator.optimizer()) parameters = problem_iterator.parameters(model) problem_iterator.primal_builder!(model, parameters) @@ -102,6 +104,7 @@ function test_worst_case_problem_iterator(path::AbstractString, num_p=10) if !isnothing(recorder) set_primal_variable!(recorder, [x]) set_dual_variable!(recorder, [cons]) + set_model!(recorder) end return model, [parameters] From f5c06fe1487a844b953c477a2b0510705ca51dfe Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Mon, 29 Jan 2024 16:10:09 -0500 Subject: [PATCH 65/74] update penalty --- src/cutting_planes.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/cutting_planes.jl b/src/cutting_planes.jl index 95a998d..0510d3b 100644 --- a/src/cutting_planes.jl +++ b/src/cutting_planes.jl @@ -115,7 +115,13 @@ function delete_binary_terms!(m::JuMP.Model; delete_objective=true) return end -function add_deficit_constraints!(model::JuMP.Model; penalty=1e7) +function add_deficit_constraints!(model::JuMP.Model; penalty=nothing) + if isnothing(penalty) + obj = objective_function(model) + # get the highest coefficient + penalty = maximum(abs.(values(obj.terms))) + penalty = penalty * 1.1 + end consrefs = [con for con in all_constraints(model, include_variable_in_set_constraints=false)] @variable(model, deficit[1:length(consrefs)]) @variable(model, norm_deficit) From 8b5588925006dbec220304317624c44ccf0cb68d Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 16:21:43 -0500 Subject: [PATCH 66/74] update with model save --- examples/unitcommitment/uc_nn_training.jl | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 58b2230..0886f77 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -137,12 +137,19 @@ function update_epochs(epoch) return nothing end -controls=[Step(1), - # NumberSinceBest(20), +model_dir = joinpath(dirname(@__FILE__), "models") +mkpath(model_dir) + +save_control = + MLJIteration.skip(Save(joinpath(model_dir, save_file * ".jls")), predicate=3) + +controls=[Step(2), + NumberSinceBest(6), # PQ(; alpha=0.9, k=30), - GL(; alpha=200.0), + GL(; alpha=4.0), InvalidValue(), - TimeLimit(; t=24), + TimeLimit(; t=1), + save_control, WithLossDo(update_loss), WithReportDo(update_training_loss), WithIterationsDo(update_epochs) @@ -168,4 +175,4 @@ mach = machine(iterated_pipe, X, y) fit!(mach; verbosity=2) # Finish the run -close(lg) \ No newline at end of file +close(lg) From 297c5e47d2db356016529cd0826f12d7bf0601c2 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 17:02:54 -0500 Subject: [PATCH 67/74] update save train --- examples/unitcommitment/uc_nn_training.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 0886f77..70a1627 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -176,3 +176,6 @@ fit!(mach; verbosity=2) # Finish the run close(lg) + +# Save model +MLJ.save(joinpath(model_dir, save_file * ".jls"), mach) From 7879cd1907c3201ba65cfc122f333ddbe0187539 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 17:48:54 -0500 Subject: [PATCH 68/74] update code --- .gitignore | 1 + examples/unitcommitment/uc_nn_training.jl | 2 + examples/unitcommitment/unitcommitment.jl | 100 ++-------------------- src/L2O.jl | 5 +- src/metrics.jl | 7 ++ 5 files changed, 22 insertions(+), 93 deletions(-) create mode 100644 src/metrics.jl diff --git a/.gitignore b/.gitignore index 9315d46..74c4709 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ examples/unitcommitment/app/* *.edu examples/unitcommitment/wandb/* *.png +*.jls diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 70a1627..68cc081 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -161,6 +161,8 @@ controls=[Step(2), # d_term = Flux.mse(gradient( ( _x ) -> sum(layer( _x )), x), y[:, 2:end]) # return o_term + d_term * 0.1 # end +# layer = mach.fitresult.fitresult[1] +# gradient( ( _x ) -> sum(layer( _x )), X') iterated_pipe = IteratedModel(model=nn, diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index f356e05..4b500e7 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -6,110 +6,26 @@ # Load Functions ############## -import Pkg; Pkg.activate(dirname(dirname(@__DIR__))); Pkg.instantiate() - using MLJFlux -using CUDA using Flux using MLJ - -include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) - -include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) - -data_dir = joinpath(dirname(@__FILE__), "data") +using L2O ############## # Parameters ############## -filetype=ArrowFile -case_name = ARGS[3] # case_name = "case300" -date = ARGS[4] # date="2017-01-01" -horizon = parse(Int, ARGS[5]) # horizon=2 + +case_name = "case300" +date = "2017-01-01" +horizon = 2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) -data_dir = joinpath(data_dir, case_name, date, "h" * string(horizon)) +model_dir = joinpath(pwd(), "examples", "unitcommitment", "models") ############## -# Fit DNN approximator +# Load Model ############## -# Read input and output data - -# Separate input and output variables -output_variables = output_data[!, :objective] -input_features = innerjoin(input_data, output_data[!, [:id]], on = :id)[!, Symbol.(bin_vars_names)] # just use success solves - -# optimiser=Flux.Optimise.Adam() -optimiser=ConvexRule( - Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) -) -nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder([1024, 1024, 300, 64, 32]), # [1024, 300, 64, 32] - rng=123, - epochs=100, - optimiser=optimiser, - acceleration=CUDALibs(), - batch_size=24, -) - -X = vcat(X, Matrix(input_features)) -y = vcat(y, output_variables) - -# Constrols - -clear() = begin - global losses = [] - global training_losses = [] - global epochs = [] - return nothing -end - -function update_loss(loss) - @info "Loss: $loss" - push!(losses, loss) -end -update_training_loss(report) = - push!(training_losses, - report.training_losses[end]) -update_epochs(epoch) = push!(epochs, epoch) - -controls=[Step(1), - NumberSinceBest(6), - InvalidValue(), - TimeLimit(5/60), - WithLossDo(update_loss), - WithReportDo(update_training_loss), - WithIterationsDo(update_epochs) -] - -# WIP -function SobolevLoss_mse(x, y) - o_term = Flux.mse(x, y[:, 1]) - d_term = Flux.mse(gradient( ( _x ) -> sum(layer( _x )), x), y[:, 2:end]) - return o_term + d_term * 0.1 -end - -iterated_pipe = - IteratedModel(model=nn, - controls=controls, - resampling=Holdout(fraction_train=0.8), - measure = Flux.mae, -) - -# Fit model -clear() -mach = machine(iterated_pipe, X, y) -fit!(mach; verbosity=2) - -# Make predictions -predictions = convert.(Float64, predict(mach, X)) -error = abs.(y .- predictions) ./ y -mean(error) - -# Derivatives -layer = mach.fitresult.fitresult[1] -gradient( ( _x ) -> sum(layer( _x )), X') - +mach = machine(joinpath(model_dir, save_file * ".jls")) ############## # Solve using DNN approximator diff --git a/src/L2O.jl b/src/L2O.jl index 1542ccf..4f5e58c 100644 --- a/src/L2O.jl +++ b/src/L2O.jl @@ -33,7 +33,9 @@ export ArrowFile, FullyConnectedBuilder, make_convex!, make_convex, - ConvexRule + ConvexRule, + relative_rmse, + relative_mae include("datasetgen.jl") include("csvrecorder.jl") @@ -42,5 +44,6 @@ include("worst_case.jl") include("worst_case_iter.jl") include("FullyConnected.jl") include("nn_expression.jl") +include("metrics.jl") end diff --git a/src/metrics.jl b/src/metrics.jl new file mode 100644 index 0000000..cff131d --- /dev/null +++ b/src/metrics.jl @@ -0,0 +1,7 @@ +function relative_rmse(ŷ, y) + return sqrt(mean(((ŷ .- y) ./ y) .^ 2)) +end + +function relative_mae(ŷ, y) + return mean(abs.((ŷ .- y) ./ y)) +end \ No newline at end of file From 6f00eac23a3da06afc685591c8351f7ef0cbf8d9 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 18:28:24 -0500 Subject: [PATCH 69/74] update train --- examples/unitcommitment/uc_nn_training.jl | 27 +---------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 68cc081..01065e8 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -80,20 +80,10 @@ lg = WandbLogger( ) ) -# global_logger(lg) - optimiser=ConvexRule( Flux.Optimise.Adam(get_config(lg, "learning_rate"), (0.9, 0.999), 1.0e-8, IdDict{Any,Any}()) ) -function relative_rmse(ŷ, y) - return sqrt(mean(((ŷ .- y) ./ y) .^ 2)) -end - -function relative_mae(ŷ, y) - return mean(abs.((ŷ .- y) ./ y)) -end - nn = MultitargetNeuralNetworkRegressor(; builder=FullyConnectedBuilder(get_config(lg, "layers")), rng=get_config(lg, "rng"), @@ -107,33 +97,18 @@ nn = MultitargetNeuralNetworkRegressor(; # Constrols -# clear() = begin -# global losses = [] -# global training_losses = [] -# global epochs = [] -# return nothing -# end - function update_loss(loss) - # @info "metrics" loss=loss Wandb.log(lg, Dict("metrics/loss" => loss)) - # push!(losses, loss) return nothing end function update_training_loss(report) - # @info "metrics" training_loss=report.training_losses[end] Wandb.log(lg, Dict("metrics/training_loss" => report.training_losses[end])) - # push!(training_losses, - # report.training_losses[end] - # ) return nothing end function update_epochs(epoch) - # @info "log" epoch=epoch Wandb.log(lg, Dict("log/epoch" => epoch)) - # push!(epochs, epoch) return nothing end @@ -180,4 +155,4 @@ fit!(mach; verbosity=2) close(lg) # Save model -MLJ.save(joinpath(model_dir, save_file * ".jls"), mach) +MLJ.save(joinpath(model_dir, save_file * ".jlso"), mach) From a1966aaf3faa69994153e94867576e74cef23eab Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 19:28:30 -0500 Subject: [PATCH 70/74] update --- .gitignore | 1 + Project.toml | 21 +++++++++++---------- src/L2O.jl | 1 + 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 74c4709..3e5a6cc 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ examples/unitcommitment/app/* examples/unitcommitment/wandb/* *.png *.jls +*.jlso diff --git a/Project.toml b/Project.toml index f18dc7d..a928939 100644 --- a/Project.toml +++ b/Project.toml @@ -1,17 +1,8 @@ -authors = ["andrewrosemberg and contributors"] name = "L2O" uuid = "e1d8bfa7-c465-446a-84b9-451470f6e76c" +authors = ["andrewrosemberg and contributors"] version = "1.2.0-DEV" -[compat] -Arrow = "2" -CSV = "0.10" -Dualization = "0.5" -JuMP = "1" -ParametricOptInterface = "0.7" -Zygote = "^0.6.68" -julia = "1.6" - [deps] Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45" CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" @@ -25,9 +16,19 @@ Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +[compat] +Arrow = "2" +CSV = "0.10" +Dualization = "0.5" +JuMP = "1" +ParametricOptInterface = "0.7" +Zygote = "^0.6.68" +julia = "1.6" + [extras] AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918" CUDA_Runtime_jll = "76a88914-d11a-5bdc-97e0-2f5a05c973a2" diff --git a/src/L2O.jl b/src/L2O.jl index 4f5e58c..7396ca9 100644 --- a/src/L2O.jl +++ b/src/L2O.jl @@ -8,6 +8,7 @@ using UUIDs import ParametricOptInterface as POI import JuMP.MOI as MOI import Base: string +using Statistics using Nonconvex using Zygote From a1198b84375b7a9c445c8e7225e3e412751d5033 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Mon, 29 Jan 2024 21:05:45 -0500 Subject: [PATCH 71/74] update --- .gitignore | 1 + examples/unitcommitment/training_utils.jl | 15 ++++++ examples/unitcommitment/uc_nn_training.jl | 28 +++++----- examples/unitcommitment/unitcommitment.jl | 63 +++++++++++++++++++++-- 4 files changed, 88 insertions(+), 19 deletions(-) create mode 100644 examples/unitcommitment/training_utils.jl diff --git a/.gitignore b/.gitignore index 3e5a6cc..7f361e7 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ examples/unitcommitment/wandb/* *.png *.jls *.jlso +*.jld2 diff --git a/examples/unitcommitment/training_utils.jl b/examples/unitcommitment/training_utils.jl new file mode 100644 index 0000000..a69b195 --- /dev/null +++ b/examples/unitcommitment/training_utils.jl @@ -0,0 +1,15 @@ + +function update_loss(loss) + Wandb.log(lg, Dict("metrics/loss" => loss)) + return nothing +end + +function update_training_loss(report) + Wandb.log(lg, Dict("metrics/training_loss" => report.training_losses[end])) + return nothing +end + +function update_epochs(epoch) + Wandb.log(lg, Dict("log/epoch" => epoch)) + return nothing +end \ No newline at end of file diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 01065e8..6da917b 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -20,6 +20,7 @@ using Wandb, Dates, Logging using Statistics include(joinpath(dirname(@__FILE__), "bnb_dataset.jl")) +include(joinpath(dirname(@__FILE__), "training_utils.jl")) include(joinpath(dirname(dirname(@__DIR__)), "src/cutting_planes.jl")) @@ -97,21 +98,6 @@ nn = MultitargetNeuralNetworkRegressor(; # Constrols -function update_loss(loss) - Wandb.log(lg, Dict("metrics/loss" => loss)) - return nothing -end - -function update_training_loss(report) - Wandb.log(lg, Dict("metrics/training_loss" => report.training_losses[end])) - return nothing -end - -function update_epochs(epoch) - Wandb.log(lg, Dict("log/epoch" => epoch)) - return nothing -end - model_dir = joinpath(dirname(@__FILE__), "models") mkpath(model_dir) @@ -155,4 +141,14 @@ fit!(mach; verbosity=2) close(lg) # Save model -MLJ.save(joinpath(model_dir, save_file * ".jlso"), mach) +# MLJ.save(joinpath(model_dir, save_file * ".jlso"), mach) + +using JLD2 + +mach = mach |> cpu + +fitted_model = mach.fitresult.fitresult[1] + +model_state = Flux.state(fitted_model) + +jldsave(joinpath(model_dir, save_file * ".jld2"); model_state) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 4b500e7..2116853 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -6,10 +6,16 @@ # Load Functions ############## -using MLJFlux +# using MLJFlux using Flux -using MLJ +# using MLJ using L2O +# using CUDA +# using Wandb +using DataFrames + +# include(joinpath(pwd(), "examples", "unitcommitment", "training_utils.jl")) +include(joinpath(pwd(), "examples", "unitcommitment", "bnb_dataset.jl")) ############## # Parameters @@ -20,12 +26,63 @@ date = "2017-01-01" horizon = 2 save_file = case_name * "_" * replace(date, "-" => "_") * "_h" * string(horizon) model_dir = joinpath(pwd(), "examples", "unitcommitment", "models") +data_file_path = joinpath(pwd(), "examples", "unitcommitment", "data") +case_file_path = joinpath(data_file_path, case_name, date, "h"*string(horizon)) ############## # Load Model ############## -mach = machine(joinpath(model_dir, save_file * ".jls")) +# mach = machine(joinpath(model_dir, save_file * ".jlso")) + +model_state = JLD2.load(joinpath(model_dir, save_file * ".jld2"), "model_state") + +############## +# Load Data +############## + +file_in = readdir(joinpath(case_file_path, "input"), join=true)[1] +batch_id = split(split(split(file_in, "/")[end], "_input_")[2], ".")[1] +file_outs = readdir(joinpath(case_file_path, "output"), join=true) +file_out = [file for file in file_outs if occursin(batch_id, file)][1] + +input_table = Arrow.Table(file_in) |> DataFrame +output_table = Arrow.Table(file_out) |> DataFrame + +num_input = size(input_table, 2) - 1 + +train_table = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id) +input_features = train_table[!, Not([:id, :objective])] +X = Float32.(Matrix(input_features)) +y = Float32.(Matrix(train_table[!, [:objective]])) + +############## +# Load Instance +############## +instance = UnitCommitment.read_benchmark( + joinpath("matpower", case_name, date), +) +instance.time = horizon + +# impose load +for i in 1:length(instance.buses) + bus = instance.buses[i] + bus_name = bus.name + for h in 1:horizon + bus.load[h] = input_table[1, Symbol("load_" * bus_name * "_" * string(h))] + end +end + +model = build_model_uc(instance) +bin_vars, bin_vars_names = bin_variables_retriever(model) +load_inputs = [ + +flux_model = FullyConnected( + +# Remove binary constraints +upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) + +predict_mode(mach, rand(1428)') ############## # Solve using DNN approximator From 93b04b90112b3cfe88aacde5aa923687757c290a Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Tue, 30 Jan 2024 10:45:48 -0500 Subject: [PATCH 72/74] update --- examples/unitcommitment/uc_nn_training.jl | 6 +++--- examples/unitcommitment/unitcommitment.jl | 16 +++++++++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 6da917b..9c76c3c 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -63,8 +63,8 @@ output_table = vcat(output_tables...) # Separate input and output variables & ignore id time status primal_status dual_status train_table = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id) -input_features = train_table[!, Not([:id, :objective])] -X = Float32.(Matrix(input_features)) +input_features = names(train_table[!, Not([:id, :objective])]) +X = Float32.(Matrix(train_table[!, input_features])) y = Float32.(Matrix(train_table[!, [:objective]])) # Define model and logger @@ -151,4 +151,4 @@ fitted_model = mach.fitresult.fitresult[1] model_state = Flux.state(fitted_model) -jldsave(joinpath(model_dir, save_file * ".jld2"); model_state) +jldsave(joinpath(model_dir, save_file * ".jld2"); model_state=model_state, layers=get_config(lg, "layers"), input_features=input_features) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 2116853..332c9f4 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -13,6 +13,8 @@ using L2O # using CUDA # using Wandb using DataFrames +using JLD2 +using Arrow # include(joinpath(pwd(), "examples", "unitcommitment", "training_utils.jl")) include(joinpath(pwd(), "examples", "unitcommitment", "bnb_dataset.jl")) @@ -32,7 +34,6 @@ case_file_path = joinpath(data_file_path, case_name, date, "h"*string(horizon)) ############## # Load Model ############## - # mach = machine(joinpath(model_dir, save_file * ".jlso")) model_state = JLD2.load(joinpath(model_dir, save_file * ".jld2"), "model_state") @@ -73,17 +74,22 @@ for i in 1:length(instance.buses) end end +# build model model = build_model_uc(instance) bin_vars, bin_vars_names = bin_variables_retriever(model) -load_inputs = [ -flux_model = FullyConnected( +############## +# Build DNN +############## + +input_size = size(input_table, 2) - 1 +flux_model = FullyConnected(input_size, [1024, 512, 64], 1) +Float64(relative_mae(flux_model(X'), y')) + # Remove binary constraints upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) -predict_mode(mach, rand(1428)') - ############## # Solve using DNN approximator ############## From de35011c358a62c90b9d7a7f879bc2441f3901e8 Mon Sep 17 00:00:00 2001 From: Andrew David Werner Rosemberg Date: Tue, 30 Jan 2024 14:03:36 -0500 Subject: [PATCH 73/74] update code --- examples/unitcommitment/uc_nn_training.jl | 7 +-- examples/unitcommitment/unitcommitment.jl | 54 +++++++++++++++-------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/examples/unitcommitment/uc_nn_training.jl b/examples/unitcommitment/uc_nn_training.jl index 9c76c3c..9c5f39c 100644 --- a/examples/unitcommitment/uc_nn_training.jl +++ b/examples/unitcommitment/uc_nn_training.jl @@ -68,11 +68,12 @@ X = Float32.(Matrix(train_table[!, input_features])) y = Float32.(Matrix(train_table[!, [:objective]])) # Define model and logger +layers = [1024, 512, 64] # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] lg = WandbLogger( project = "unit_commitment_proxies", name = "$(case_name)-$(date)-h$(horizon)-$(now())", config = Dict( - "layers" => [1024, 512, 64], # [1024, 300, 64, 32] , [1024, 1024, 300, 64, 32] + "layers" => layers, "batch_size" => 32, "optimiser" => "ConvexRule", "learning_rate" => 0.01, @@ -86,7 +87,7 @@ optimiser=ConvexRule( ) nn = MultitargetNeuralNetworkRegressor(; - builder=FullyConnectedBuilder(get_config(lg, "layers")), + builder=FullyConnectedBuilder(layers), rng=get_config(lg, "rng"), epochs=5000, optimiser=optimiser, @@ -151,4 +152,4 @@ fitted_model = mach.fitresult.fitresult[1] model_state = Flux.state(fitted_model) -jldsave(joinpath(model_dir, save_file * ".jld2"); model_state=model_state, layers=get_config(lg, "layers"), input_features=input_features) +jldsave(joinpath(model_dir, save_file * ".jld2"); model_state=model_state, layers=layers, input_features=input_features) diff --git a/examples/unitcommitment/unitcommitment.jl b/examples/unitcommitment/unitcommitment.jl index 332c9f4..ffcba45 100644 --- a/examples/unitcommitment/unitcommitment.jl +++ b/examples/unitcommitment/unitcommitment.jl @@ -9,14 +9,15 @@ # using MLJFlux using Flux # using MLJ -using L2O +# using L2O # using CUDA # using Wandb using DataFrames using JLD2 using Arrow +using Gurobi -# include(joinpath(pwd(), "examples", "unitcommitment", "training_utils.jl")) +include(joinpath(pwd(), "src/cutting_planes.jl")) include(joinpath(pwd(), "examples", "unitcommitment", "bnb_dataset.jl")) ############## @@ -31,12 +32,20 @@ model_dir = joinpath(pwd(), "examples", "unitcommitment", "models") data_file_path = joinpath(pwd(), "examples", "unitcommitment", "data") case_file_path = joinpath(data_file_path, case_name, date, "h"*string(horizon)) +upper_solver = Gurobi.Optimizer + ############## # Load Model ############## # mach = machine(joinpath(model_dir, save_file * ".jlso")) -model_state = JLD2.load(joinpath(model_dir, save_file * ".jld2"), "model_state") +model_save = JLD2.load(joinpath(model_dir, save_file * ".jld2")) + +model_state = model_save["model_state"] + +input_features = model_save["input_features"] + +layers = model_save["layers"] ############## # Load Data @@ -50,13 +59,12 @@ file_out = [file for file in file_outs if occursin(batch_id, file)][1] input_table = Arrow.Table(file_in) |> DataFrame output_table = Arrow.Table(file_out) |> DataFrame -num_input = size(input_table, 2) - 1 - train_table = innerjoin(input_table, output_table[!, [:id, :objective]]; on=:id) -input_features = train_table[!, Not([:id, :objective])] -X = Float32.(Matrix(input_features)) +X = Float32.(Matrix(train_table[!, Symbol.(input_features)])) y = Float32.(Matrix(train_table[!, [:objective]])) +true_ob_value = output_table[output_table.status .== "OPTIMAL", :objective][1] + ############## # Load Instance ############## @@ -78,18 +86,18 @@ end model = build_model_uc(instance) bin_vars, bin_vars_names = bin_variables_retriever(model) +# Remove binary constraints +upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) + ############## # Build DNN ############## -input_size = size(input_table, 2) - 1 -flux_model = FullyConnected(input_size, [1024, 512, 64], 1) +input_size = length(input_features) +flux_model = Chain(FullyConnected(input_size, layers, 1)) +Flux.loadmodel!(flux_model, model_state) Float64(relative_mae(flux_model(X'), y')) - -# Remove binary constraints -upper_model, inner_2_upper_map, cons_mapping = copy_binary_model(model) - ############## # Solve using DNN approximator ############## @@ -102,8 +110,18 @@ function NNlib.relu(ex::AffExpr) end # add nn to model -bin_vars_upper = [inner_2_upper_map[var] for var in bin_vars] -obj = mach.fitresult.fitresult[1](bin_vars_upper) +bin_vars_upper = Array{Any}(undef, length(input_features)) +for i in 1:length(input_features) + feature = input_features[i] + if occursin("load", feature) + bin_vars_upper[i] = input_table[1, Symbol(feature)] + else + idx = findfirst(isequal(feature), bin_vars_names) + bin_vars_upper[i] = inner_2_upper_map[bin_vars[idx]] + end +end + +obj = flux_model(bin_vars_upper) aux = @variable(upper_model) @constraint(upper_model, obj[1] <= aux) @@ -121,12 +139,12 @@ objective_value(upper_model) # Compare ############## # Compare using approximator -mach.fitresult.fitresult[1](sol_bin_vars .* 1.0) +# mach.fitresult.fitresult[1](sol_bin_vars .* 1.0) abs(value(obj[1]) - true_ob_value) / true_ob_value # Compare using foward pass -for i in 1:length(bin_vars_upper) - fix(bin_vars[i], sol_bin_vars[i]) +for var in bin_vars + fix(var, value(inner_2_upper_map[var])) end JuMP.optimize!(model) From bde9270e0349ea33a8229cdf823072995f46d86c Mon Sep 17 00:00:00 2001 From: Andrew Rosemberg Date: Tue, 30 Jan 2024 15:23:41 -0500 Subject: [PATCH 74/74] update vis --- examples/unitcommitment/visualize.jl | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/examples/unitcommitment/visualize.jl b/examples/unitcommitment/visualize.jl index 9d75bb0..8984238 100644 --- a/examples/unitcommitment/visualize.jl +++ b/examples/unitcommitment/visualize.jl @@ -1,6 +1,10 @@ using Plots using Arrow using DataFrames +using Statistics +using LinearAlgebra + +cossim(x,y) = dot(x,y) / (norm(x)*norm(y)) # Data Parameters case_name = "case300" @@ -11,15 +15,19 @@ case_file_path = joinpath(path_dataset, case_name, date, "h"*horizon) # Load input and output data tables file_ins = readdir(joinpath(case_file_path, "input"), join=true) +batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins] file_outs = readdir(joinpath(case_file_path, "output"), join=true) +file_outs = [file_outs[findfirst(x -> occursin(batch_id, x), file_outs)] for batch_id in batch_ids] # Load input and output data tables input_tables = Array{DataFrame}(undef, length(file_ins)) output_tables = Array{DataFrame}(undef, length(file_outs)) -for (i, file) in enumerate(file_ins) +for i in 1:length(file_ins) + file = file_ins[i] input_tables[i] = Arrow.Table(file) |> DataFrame end -for (i, file) in enumerate(file_outs) +for i in 1:length(file_outs) + file = file_outs[i] output_tables[i] = Arrow.Table(file) |> DataFrame # if all the status are OPTIMAL, make them INFEASIBLE if all(output_tables[i].status .== "OPTIMAL") @@ -27,13 +35,24 @@ for (i, file) in enumerate(file_outs) end end +# Nominal Loads +load_columns = [col for col in names(input_tables[1]) if occursin("load", col)] +nominal_loads = Vector(input_tables[1][1, load_columns]) + +# Load divergence +cos_sim = [acos(cossim(nominal_loads, Vector(input_table[1, load_columns]))) for input_table in input_tables] +norm_sim = [norm(nominal_loads) - norm(Vector(input_table[1, load_columns])) for input_table in input_tables] ./ norm(nominal_loads) .+ 1 + +# Polar Plot divergence +plotly() +plt = plot(cos_sim, norm_sim, title="Load Similarity", proj = :polar, m = 2) + # concatenate all the input and output tables input_table = vcat(input_tables...) output_table = vcat(output_tables...) # sum each row of the input table (excluding the id) and store it in a new column commit_columns = [col for col in names(input_table) if !occursin("load", col) && !occursin("id", col)] -load_columns = [col for col in names(input_table) if occursin("load", col)] input_table.total_commitment = sum(eachcol(input_table[!, commit_columns])) input_table.total_load = sum(eachcol(input_table[!, load_columns]))