Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,13 @@ version = "1.2.0-DEV"
Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45"
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
Dualization = "191a621a-6537-11e9-281d-650236a99e60"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845"
Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978"
ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

Expand All @@ -27,13 +30,13 @@ AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918"
Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
NonconvexNLopt = "b43a31b8-ff9b-442d-8e31-c163daa8ab75"
PGLib = "07a8691f-3d11-4330-951b-3c50f98338be"
PowerModels = "c36e90e8-916a-50a6-bd94-075b64ef4655"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"

[targets]
test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "Flux", "DataFrames", "Clarabel", "Ipopt", "NonconvexNLopt"]
test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "DataFrames", "Clarabel", "Ipopt", "NonconvexNLopt", "MLJ"]
85 changes: 48 additions & 37 deletions examples/flux/flux_forecaster_script.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ using TestEnv
TestEnv.activate()

using Arrow
using CSV
using Flux
using DataFrames
using PowerModels
Expand All @@ -10,26 +11,38 @@ using L2O
# Paths
case_name = "pglib_opf_case300_ieee" # pglib_opf_case300_ieee # pglib_opf_case5_pjm
network_formulation = SOCWRConicPowerModel # SOCWRConicPowerModel # DCPPowerModel
filetype = ArrowFile
filetype = ArrowFile # ArrowFile # CSVFile
path_dataset = joinpath(pwd(), "examples", "powermodels", "data")
case_file_path = joinpath(path_dataset, case_name, string(network_formulation))

# Load input and output data tables
iter_files = readdir(joinpath(case_file_path))
iter_files = filter(x -> occursin(string(ArrowFile), x), iter_files)
file_ins = [joinpath(case_file_path, file) for file in iter_files if occursin("input", file)]
file_outs = [joinpath(case_file_path, file) for file in iter_files if occursin("output", file)]
iter_files = filter(x -> occursin(string(filetype), x), iter_files)
file_ins = [
joinpath(case_file_path, file) for file in iter_files if occursin("input", file)
]
file_outs = [
joinpath(case_file_path, file) for file in iter_files if occursin("output", file)
]
batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins]

# Load input and output data tables
train_idx = [1]
test_idx = [2]
train_idx = collect(1:floor(Int, length(file_ins) * 0.5))
test_idx = setdiff(1:length(file_ins), train_idx)

input_table_train = Arrow.Table(file_ins[train_idx])
output_table_train = Arrow.Table(file_outs[train_idx])
if filetype === ArrowFile
input_table_train = Arrow.Table(file_ins[train_idx])
output_table_train = Arrow.Table(file_outs[train_idx])

input_table_test = Arrow.Table(file_ins[test_idx])
output_table_test = Arrow.Table(file_outs[test_idx])
input_table_test = Arrow.Table(file_ins[test_idx])
output_table_test = Arrow.Table(file_outs[test_idx])
else
input_table_train = CSV.read(file_ins[train_idx], DataFrame)
output_table_train = CSV.read(file_outs[train_idx], DataFrame)

input_table_test = CSV.read(file_ins[test_idx], DataFrame)
output_table_test = CSV.read(file_outs[test_idx], DataFrame)
end

# Convert to dataframes
input_data_train = DataFrame(input_table_train)
Expand All @@ -40,41 +53,39 @@ output_data_test = DataFrame(output_table_test)

# Separate input and output variables
output_variables_train = output_data_train[!, Not(:id)]
input_features_train = innerjoin(input_data_train, output_data_train[!, [:id]], on = :id)[!, Not(:id)] # just use success solves

num_loads = floor(Int,size(input_features_train,2)/2)
total_volume=[sum(sqrt(input_features_train[i,l]^2 + input_features_train[i,l+num_loads]^2) for l in 1:num_loads) for i in 1:size(input_features_train,1) ]
input_features_train = innerjoin(input_data_train, output_data_train[!, [:id]]; on=:id)[
!, Not(:id)
] # just use success solves

num_loads = floor(Int, size(input_features_train, 2) / 2)
total_volume = [
sum(
sqrt(input_features_train[i, l]^2 + input_features_train[i, l + num_loads]^2) for
l in 1:num_loads
) for i in 1:size(input_features_train, 1)
]

output_variables_test = output_data_test[!, Not(:id)]
input_features_test = innerjoin(input_data_test, output_data_test[!, [:id]], on = :id)[!, Not(:id)] # just use success solves
input_features_test = innerjoin(input_data_test, output_data_test[!, [:id]]; on=:id)[
!, Not(:id)
] # just use success solves

# Define model
model = Chain(
Dense(size(input_features_train, 2), 64, relu),
Dense(64, 32, relu),
Dense(32, size(output_variables_train, 2)),
model = MultitargetNeuralNetworkRegressor(;
builder=FullyConnectedBuilder([64, 32]),
rng=123,
epochs=20,
optimiser=ConvexRule(
Flux.Optimise.Adam(0.001, (0.9, 0.999), 1.0e-8, IdDict{Any,Any}())
),
)

# Define loss function
loss(x, y) = Flux.mse(model(x), y)

# Convert the data to matrices
input_features_train = Matrix(input_features_train)'
output_variables_train = Matrix(output_variables_train)'

input_features_test = Matrix(input_features_test)'
output_variables_test = Matrix(output_variables_test)'

# Define the optimizer
optimizer = Flux.ADAM()

# Train the model
Flux.train!(
loss, Flux.params(model), [(input_features_train, output_variables_train)], optimizer
)
# Define the machine
mach = machine(model, input_features_train, output_variables_train)
fit!(mach; verbosity=2)

# Make predictions
predictions = model(input_features_test)
predictions = predict(mach, input_features)

# Calculate the error
error = Flux.mse(predictions, output_variables_test)
45 changes: 0 additions & 45 deletions examples/flux/test_flux_forecaster.jl

This file was deleted.

74 changes: 49 additions & 25 deletions examples/powermodels/generate_full_datasets_script.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# run with: julia ./examples/powermodels/generate_full_datasets_script.jl "./examples/powermodels/data/pglib_opf_case300_ieee/case300.config.toml"
config_path = ARGS[1]

import Pkg; Pkg.activate(".")
using Pkg: Pkg;
Pkg.activate(".");

using TestEnv
TestEnv.activate()
Expand All @@ -27,13 +28,14 @@ using NonconvexNLopt

########## POI SOLVER ##########

cached = () -> MOI.Bridges.full_bridge_optimizer(
MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
Clarabel.Optimizer(),
),
Float64,
)
cached =
() -> MOI.Bridges.full_bridge_optimizer(
MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
Clarabel.Optimizer(),
),
Float64,
)

POI_cached_optimizer() = POI.Optimizer(cached())

Expand All @@ -45,12 +47,12 @@ path = config["export_dir"]
path_powermodels = joinpath(dirname(@__FILE__)) # TODO: Make it a submodule
include(joinpath(path_powermodels, "pglib_datagen.jl"))

filetype = ArrowFile
filetype = ArrowFile # ArrowFile # CSVFile

case_name = config["case_name"]
case_file_path = joinpath(path, case_name)
mkpath(case_file_path)
network_formulation= eval(Symbol(ARGS[2]))
network_formulation = eval(Symbol(ARGS[2])) # SOCWRConicPowerModel # DCPPowerModel

########## SAMPLER DATASET GENERATION ##########

Expand All @@ -59,9 +61,16 @@ if haskey(config, "sampler")
num_p = config["sampler"]["num_samples"]
global success_solves = 0.0
for i in 1:num_batches
_success_solves, number_variables, number_loads, batch_id = generate_dataset_pglib(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
internal_load_sampler= (_o, n) -> load_sampler(_o, n, max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01)
_success_solves, number_variables, number_loads, batch_id = generate_dataset_pglib(
case_file_path,
case_name;
num_p=num_p,
filetype=filetype,
network_formulation=network_formulation,
optimizer=POI_cached_optimizer,
internal_load_sampler=(_o, n) -> load_sampler(
_o, n; max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01
),
)
global success_solves += _success_solves
end
Expand All @@ -81,35 +90,45 @@ if haskey(config, "line_search")
early_stop_fn = (model, status, recorder) -> !status

global success_solves = 0.0
global batch_id = string(uuid1())
for ibatc in 1:num_batches
_success_solves, number_variables, number_loads, b_id = generate_dataset_pglib(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
internal_load_sampler= (_o, n, idx, num_inputs) -> line_sampler(_o, n, idx, num_inputs, ibatc; step_multiplier=step_multiplier),
_success_solves, number_variables, number_loads, b_id = generate_dataset_pglib(
case_file_path,
case_name;
num_p=num_p,
filetype=filetype,
network_formulation=network_formulation,
optimizer=POI_cached_optimizer,
internal_load_sampler=(_o, n, idx, num_inputs) -> line_sampler(
_o, n, idx, num_inputs, ibatc; step_multiplier=step_multiplier
),
early_stop_fn=early_stop_fn,
batch_id=batch_id,
)
global success_solves += _success_solves
end
success_solves /= num_batches

@info "Success solves: $(success_solves * 100) % of $(num_batches * num_p)"
end

########## WORST CASE DUAL DATASET GENERATION ##########
if haskey(config, "worst_case_dual")
num_p = config["worst_case_dual"]["num_samples"]
function optimizer_factory()
IPO_OPT = Gurobi.Optimizer()
IPO_OPT = Gurobi.Optimizer()
# IPO_OPT = MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100)
# IPO = MOI.Bridges.Constraint.SOCtoNonConvexQuad{Float64}(IPO_OPT)
# MIP = QuadraticToBinary.Optimizer{Float64}(IPO)
return () -> IPO_OPT
end

success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer_factory=optimizer_factory,
hook = (model) -> set_optimizer_attribute(model, "NonConvex", 2)
success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset(
case_file_path,
case_name;
num_p=num_p,
filetype=filetype,
network_formulation=network_formulation,
optimizer_factory=optimizer_factory,
hook=(model) -> set_optimizer_attribute(model, "NonConvex", 2),
)

@info "Success solves Worst Case: $(success_solves) of $(num_p)"
Expand All @@ -119,8 +138,13 @@ end
if haskey(config, "worst_case_nonconvex")
num_p = config["worst_case_nonconvex"]["num_samples"]

success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset_Nonconvex(case_file_path, case_name;
num_p=num_p, filetype=filetype, network_formulation=network_formulation, optimizer=POI_cached_optimizer,
success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset_Nonconvex(
case_file_path,
case_name;
num_p=num_p,
filetype=filetype,
network_formulation=network_formulation,
optimizer=POI_cached_optimizer,
)

@info "Success solves Worst Case: $(success_solves * 100) of $(num_p)"
Expand Down
Loading