Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
78aa871
working model for feasible limits of load
andrewrosemberg Sep 18, 2023
c30cb31
update test
andrewrosemberg Sep 18, 2023
33059d6
update code
andrewrosemberg Sep 18, 2023
85bf5ee
add worstcaseiterator
andrewrosemberg Sep 18, 2023
4ea4a9b
update
andrewrosemberg Sep 18, 2023
94343c3
fix
andrewrosemberg Sep 18, 2023
3a8152a
add tests
andrewrosemberg Sep 18, 2023
bb772ce
tests passing
andrewrosemberg Sep 18, 2023
43e4839
fix test
andrewrosemberg Sep 18, 2023
ec0a4c5
tests passing
andrewrosemberg Sep 18, 2023
664a202
update code
andrewrosemberg Sep 19, 2023
a494f6b
update script
andrewrosemberg Sep 19, 2023
edb522f
update generator
andrewrosemberg Sep 19, 2023
7b10f84
update code
andrewrosemberg Sep 19, 2023
b07d392
tests passing bayes
andrewrosemberg Sep 19, 2023
d9bd22f
add pglib_bayes
andrewrosemberg Sep 19, 2023
be9efe6
update dataset generation
andrewrosemberg Sep 19, 2023
ee1c4c0
working
andrewrosemberg Sep 19, 2023
3649c01
end
andrewrosemberg Sep 19, 2023
69a68f1
update code
andrewrosemberg Sep 19, 2023
d875f53
update code
andrewrosemberg Sep 20, 2023
dbef0b0
update names
andrewrosemberg Sep 22, 2023
1a41033
update code
andrewrosemberg Sep 22, 2023
5853711
update
andrewrosemberg Sep 22, 2023
f68c493
update
andrewrosemberg Sep 22, 2023
f3b34f1
update code
andrewrosemberg Sep 23, 2023
760247e
update
andrewrosemberg Sep 23, 2023
9b8adf7
update
andrewrosemberg Sep 23, 2023
f920399
update code
andrewrosemberg Sep 23, 2023
8f11630
update tests
andrewrosemberg Sep 23, 2023
9539e2c
update tests
andrewrosemberg Sep 23, 2023
10af4f8
rm legacy
andrewrosemberg Sep 23, 2023
85d86f4
update code
andrewrosemberg Sep 23, 2023
337efe9
add toml example
andrewrosemberg Sep 23, 2023
d9d0889
add line example
andrewrosemberg Sep 23, 2023
2cac65b
update code
Sep 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 12 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,31 +1,39 @@
name = "L2O"
uuid = "e1d8bfa7-c465-446a-84b9-451470f6e76c"
authors = ["andrewrosemberg <andrewrosemberg@gmail.com> and contributors"]
version = "1.0.0-DEV"
version = "1.2.0-DEV"

[deps]
Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45"
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
Dualization = "191a621a-6537-11e9-281d-650236a99e60"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978"
ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
Arrow = "2"
CSV = "0.10"
Dualization = "0.5"
JuMP = "1"
ParametricOptInterface = "0.5"
julia = "1.6"

[extras]
AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918"
Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
NonconvexNLopt = "b43a31b8-ff9b-442d-8e31-c163daa8ab75"
PGLib = "07a8691f-3d11-4330-951b-3c50f98338be"
PowerModels = "c36e90e8-916a-50a6-bd94-075b64ef4655"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e"

[targets]
test = ["Test", "DelimitedFiles", "Downloads", "HiGHS", "PowerModels", "Flux", "DataFrames", "Clarabel"]
test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "Flux", "DataFrames", "Clarabel", "Ipopt", "NonconvexNLopt"]
13 changes: 9 additions & 4 deletions examples/flux/flux_forecaster_script.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,18 @@ using Arrow
using Flux
using DataFrames
using PowerModels
using L2O

# Paths
path_dataset = joinpath(pwd(), "examples", "powermodels", "data")
case_name = "pglib_opf_case5_pjm"
case_name = "pglib_opf_case300_ieee" # pglib_opf_case300_ieee # pglib_opf_case5_pjm
network_formulation = SOCWRConicPowerModel # SOCWRConicPowerModel # DCPPowerModel
filetype = ArrowFile
network_formulation = DCPPowerModel
case_file_path = joinpath(path, case_name, string(network_formulation))
path_dataset = joinpath(pwd(), "examples", "powermodels", "data")
case_file_path = joinpath(path_dataset, case_name, string(network_formulation))

# Load input and output data tables
iter_files = readdir(joinpath(case_file_path))
iter_files = filter(x -> occursin(string(ArrowFile), x), iter_files)
file_ins = [joinpath(case_file_path, file) for file in iter_files if occursin("input", file)]
file_outs = [joinpath(case_file_path, file) for file in iter_files if occursin("output", file)]
batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins]
Expand All @@ -40,6 +42,9 @@ output_data_test = DataFrame(output_table_test)
output_variables_train = output_data_train[!, Not(:id)]
input_features_train = innerjoin(input_data_train, output_data_train[!, [:id]], on = :id)[!, Not(:id)] # just use success solves

num_loads = floor(Int,size(input_features_train,2)/2)
total_volume=[sum(sqrt(input_features_train[i,l]^2 + input_features_train[i,l+num_loads]^2) for l in 1:num_loads) for i in 1:size(input_features_train,1) ]

output_variables_test = output_data_test[!, Not(:id)]
input_features_test = innerjoin(input_data_test, output_data_test[!, [:id]], on = :id)[!, Not(:id)] # just use success solves

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Name of the reference PGLib case. Must be a valid PGLib case name.
case_name = "pglib_opf_case300_ieee"

# Directory where instance/solution files are exported
# must be a valid directory
export_dir = "EXPORT_DIR"

# [sampler]
# num_batches = 1
# num_samples = 10

[line_search]
num_samples = 10

# [worst_case_dual]
# num_samples = 10

# [worst_case_nonconvex]
# num_samples = 10
125 changes: 103 additions & 22 deletions examples/powermodels/generate_full_datasets_script.jl
Original file line number Diff line number Diff line change
@@ -1,46 +1,127 @@
# run with: julia ./examples/powermodels/generate_full_datasets_script.jl "./examples/powermodels/data/pglib_opf_case300_ieee/case300.config.toml"
config_path = ARGS[1]

import Pkg; Pkg.activate(".")

using TestEnv
TestEnv.activate()

using Arrow
########## SCRIPT REQUIRED PACKAGES ##########

using L2O
using Arrow
using Test
using UUIDs
using PowerModels
using Clarabel
import JuMP.MOI as MOI
import ParametricOptInterface as POI
using TOML

PowerModels.silence()

## SOLVER PACKAGES ##

cached = MOI.Bridges.full_bridge_optimizer(
using Clarabel
using Gurobi
using NonconvexNLopt

########## POI SOLVER ##########

cached = () -> MOI.Bridges.full_bridge_optimizer(
MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
Clarabel.Optimizer(),
),
Float64,
)

# Paths
path_powermodels = joinpath(pwd(), "examples", "powermodels")
path = joinpath(path_powermodels, "data")
POI_cached_optimizer() = POI.Optimizer(cached())

########## PARAMETERS ##########

config = TOML.parsefile(config_path)
path = config["export_dir"]

path_powermodels = joinpath(dirname(@__FILE__)) # TODO: Make it a submodule
include(joinpath(path_powermodels, "pglib_datagen.jl"))

# Parameters
num_batches = 1
num_p = 10
filetype = ArrowFile

# Case name
case_name = "pglib_opf_case300_ieee"
network_formulation = SOCWRConicPowerModel
case_name = config["case_name"]
case_file_path = joinpath(path, case_name)
solver = () -> POI.Optimizer(cached())

# Generate dataset
success_solves = 0.0
for i in 1:num_batches
_success_solves, number_variables, number_loads, batch_id = generate_dataset_pglib(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, solver=solver,
load_sampler= (_o, n) -> load_sampler(_o, n, max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01)
mkpath(case_file_path)
network_formulation= eval(Symbol(ARGS[2]))

########## SAMPLER DATASET GENERATION ##########

if haskey(config, "sampler")
num_batches = config["sampler"]["num_batches"]
num_p = config["sampler"]["num_samples"]
global success_solves = 0.0
for i in 1:num_batches
_success_solves, number_variables, number_loads, batch_id = generate_dataset_pglib(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
internal_load_sampler= (_o, n) -> load_sampler(_o, n, max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01)
)
global success_solves += _success_solves
end
success_solves /= num_batches

@info "Success solves Normal: $(success_solves)"
end

########## LINE SEARCH DATASET GENERATION ##########

if haskey(config, "line_search")
network_data = make_basic_network(pglib(case_name * ".m"))
step_multiplier = 1.01
num_loads = length(network_data["load"])
num_batches = num_loads + 1
num_p = config["line_search"]["num_samples"]
early_stop_fn = (model, status, recorder) -> !status

global success_solves = 0.0
global batch_id = string(uuid1())
for ibatc in 1:num_batches
_success_solves, number_variables, number_loads, b_id = generate_dataset_pglib(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
internal_load_sampler= (_o, n, idx, num_inputs) -> line_sampler(_o, n, idx, num_inputs, ibatc; step_multiplier=step_multiplier),
early_stop_fn=early_stop_fn,
batch_id=batch_id,
)
global success_solves += _success_solves
end
success_solves /= num_batches

@info "Success solves: $(success_solves * 100) % of $(num_batches * num_p)"
end

########## WORST CASE DUAL DATASET GENERATION ##########
if haskey(config, "worst_case_dual")
num_p = config["worst_case_dual"]["num_samples"]
function optimizer_factory()
IPO_OPT = Gurobi.Optimizer()
# IPO_OPT = MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100)
# IPO = MOI.Bridges.Constraint.SOCtoNonConvexQuad{Float64}(IPO_OPT)
# MIP = QuadraticToBinary.Optimizer{Float64}(IPO)
return () -> IPO_OPT
end

success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer_factory=optimizer_factory,
hook = (model) -> set_optimizer_attribute(model, "NonConvex", 2)
)
success_solves += _success_solves

@info "Success solves Worst Case: $(success_solves) of $(num_p)"
end

########## WORST CASE NONCONVEX DATASET GENERATION ##########
if haskey(config, "worst_case_nonconvex")
num_p = config["worst_case_nonconvex"]["num_samples"]

success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset_Nonconvex(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
)

@info "Success solves Worst Case: $(success_solves * 100) of $(num_p)"
end
success_solves /= num_batches
Loading