Skip to content

Commit 77471fb

Browse files
v0.12.0 fix examples (#126)
* fixed new train method layout in examples * further adjustments * allow for DiffEq default solver heurisitc * fix action * revert change * Patch example action (#128) * changes for debugging * test non escape for awk * changed escapes for debugging * cleanup * doc fix for FMIFlux.train! (#127) --------- Co-authored-by: Simon Exner <43469235+0815Creeper@users.noreply.github.com>
1 parent 177045b commit 77471fb

9 files changed

+29
-19
lines changed

.github/workflows/Example.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ jobs:
5353
echo "starting gif fixing"
5454
mv examples/src/gif_*.gif examples/src/${{ matrix.file-name }}_files
5555
$env:Path += ";C:\Program Files\Git\usr\bin"
56-
awk '{if($0~/<img src=\"data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*\" \/>/) {sub(/<img src=\"data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*\" \/>/,\"![gif](${{ matrix.file-name }}_files\/gif_\"++i\".gif)\")}}1' examples/src/${{ matrix.file-name }}.md > examples/src/tmp_${{ matrix.file-name }}.md
56+
awk '{if($0~/<img src="data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*" \/>/) {sub(/<img src="data:image\/gif;base64,[[:alpha:],[:digit:],\/,+,=]*" \/>/,"![gif](${{ matrix.file-name }}_files\/gif_"++i".gif)")}}1' examples/src/${{ matrix.file-name }}.md > examples/src/tmp_${{ matrix.file-name }}.md
5757
mv -Force examples/src/tmp_${{ matrix.file-name }}.md examples/src/${{ matrix.file-name }}.md
5858
echo "gifs should be fixed"
5959

examples/src/growing_horizon_ME.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -641,10 +641,10 @@
641641
"outputs": [],
642642
"source": [
643643
"# train\n",
644-
"paramsNet = FMIFlux.params(neuralFMU)\n",
644+
"paramsNet = Flux.params(neuralFMU)\n",
645645
"\n",
646646
"optim = Adam()\n",
647-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
647+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
648648
]
649649
},
650650
{

examples/src/juliacon_2023.ipynb

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -465,10 +465,13 @@
465465
" gates, # compute resulting dx from ANN + FMU\n",
466466
" dx -> cacheRetrieve(1:4, dx)) # stack together: dx[1,2,3,4] from cache + dx[5,6] from gates\n",
467467
"\n",
468+
" solver = Tsit5()\n",
469+
" \n",
468470
" # new NeuralFMU \n",
469471
" neuralFMU = ME_NeuralFMU(f, # the FMU used in the NeuralFMU \n",
470472
" model, # the model we specified above \n",
471473
" (tStart, tStop), # a default start ad stop time for solving the NeuralFMU\n",
474+
" solver;\n",
472475
" saveat=tSave) # the time points to save the solution at\n",
473476
" neuralFMU.modifiedState = false # speed optimization (NeuralFMU state equals FMU state)\n",
474477
" \n",
@@ -740,7 +743,7 @@
740743
" \n",
741744
" # the actual training\n",
742745
" FMIFlux.train!(loss, # the loss function for training\n",
743-
" params, # the parameters to train\n",
746+
" neuralFMU, # the parameters to train\n",
744747
" Iterators.repeated((), steps), # an iterator repeating `steps` times\n",
745748
" optim; # the optimizer to train\n",
746749
" gradient=:ForwardDiff, # currently, only ForwarDiff leads to good results for multi-event systems\n",

examples/src/juliacon_2023_helpers.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import FMIFlux: roundToLength
88
import FMIZoo: movavg
99

1010
import FMI: FMU2Solution
11+
import FMI.DifferentialEquations: Tsit5
1112
import FMIZoo: VLDM, VLDM_Data
1213

1314
function fmiSingleInstanceMode(fmu::FMU2, mode::Bool)

examples/src/mdpi_2022.ipynb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@
108108
"using FMIFlux.Flux # Machine Learning in Julia\n",
109109
"\n",
110110
"import FMI.DifferentialEquations: Tsit5 # import the Tsit5-solver\n",
111+
"import FMI: FMU2Solution\n",
111112
"using JLD2 # data format for saving/loading parameters\n",
112113
"\n",
113114
"# plotting\n",
@@ -611,14 +612,14 @@
611612
"\n",
612613
"# we use ForwardDiff for gradinet determination, because the FMU throws multiple events per time instant (this is not supported by reverse mode AD)\n",
613614
"# the chunk_size controls the nuber of forward evaluations of the model (the bigger, the less evaluations)\n",
614-
"FMIFlux.train!(loss, params, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
615+
"FMIFlux.train!(loss, neuralFMU, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
615616
"loss_after = batch_loss(params[1])"
616617
]
617618
},
618619
{
619620
"attachments": {},
620621
"cell_type": "markdown",
621-
"metadata": {},
622+
"metadata": {},
622623
"source": [
623624
"The batch loss (\"AVG\" and \"MAX\") is only updated every 5 steps, as defined in the scheduler. Every 25 steps, we plot the current batch element losses. Please note, that we only did around 100 training steps, so training has not converged for now. But we are curious and want to have a look on the intermediate results. \n",
624625
"\n",

examples/src/modelica_conference_2021.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -891,7 +891,7 @@
891891
"outputs": [],
892892
"source": [
893893
"optim = Adam()\n",
894-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
894+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
895895
]
896896
},
897897
{
@@ -950,7 +950,7 @@
950950
"for run in 1:numRuns\n",
951951
" @time for epoch in 1:numEpochs\n",
952952
" @info \"Run: $(run)/$(numRuns) Epoch: $(epoch)/$(numEpochs)\"\n",
953-
" FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
953+
" FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
954954
" end\n",
955955
" flush(stderr)\n",
956956
" flush(stdout)\n",

examples/src/simple_hybrid_CS.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@
530530
"paramsNet = FMIFlux.params(csNeuralFMU)\n",
531531
"\n",
532532
"optim = Adam()\n",
533-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
533+
"FMIFlux.train!(lossSum, csNeuralFMU, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
534534
]
535535
},
536536
{

examples/src/simple_hybrid_ME.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@
500500
"paramsNet = FMIFlux.params(neuralFMU)\n",
501501
"\n",
502502
"optim = Adam()\n",
503-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
503+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
504504
]
505505
},
506506
{
@@ -563,7 +563,7 @@
563563
},
564564
"outputs": [],
565565
"source": [
566-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
566+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
567567
"# plot results mass.s\n",
568568
"solutionAfter = neuralFMU(x₀)\n",
569569
"Plots.plot!(fig, solutionAfter; stateIndices=1:1, values=false, label=\"NeuralFMU (1500 epochs)\", linewidth=2)\n",

src/neural.jl

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1030,6 +1030,7 @@ function getComponent(nfmu::NeuralFMU)
10301030
return hasCurrentComponent(nfmu.fmu) ? getCurrentComponent(nfmu.fmu) : nothing
10311031
end
10321032

1033+
# ToDo: Separate this: NeuralFMU creation and solving!
10331034
"""
10341035
10351036
TODO: Signature, Arguments and Keyword-Arguments descriptions.
@@ -1287,7 +1288,11 @@ function (nfmu::ME_NeuralFMU)(x_start::Union{Array{<:Real}, Nothing} = nfmu.x0,
12871288
prob = ODEProblem{true}(ff, nfmu.x0, nfmu.tspan, p)
12881289

12891290
if isnothing(sensealg)
1290-
if isimplicit(solver)
1291+
if !isnothing(solver)
1292+
1293+
logWarning(nfmu.fmu, "No solver keyword detected for NeuralFMU.\nContinuous adjoint method is applied, which requires solving backward in time.\nThis might be not supported by every FMU.", 1)
1294+
sensealg = InterpolatingAdjoint(; autojacvec=ReverseDiffVJP(true), checkpointing=true)
1295+
elseif isimplicit(solver)
12911296
@assert !(alg_autodiff(solver) isa AutoForwardDiff) "Implicit solver using `autodiff=true` detected for NeuralFMU.\nThis is currently not supported, please use `autodiff=false` as solver keyword.\nExample: `Rosenbrock23(autodiff=false)` instead of `Rosenbrock23()`."
12921297

12931298
logWarning(nfmu.fmu, "Implicit solver detected for NeuralFMU.\nContinuous adjoint method is applied, which requires solving backward in time.\nThis might be not supported by every FMU.", 1)
@@ -1677,23 +1682,23 @@ end
16771682

16781683
"""
16791684
1680-
train!(loss, params::Union{Flux.Params, Zygote.Params}, data, optim::Flux.Optimise.AbstractOptimiser; gradient::Symbol=:Zygote, cb=nothing, chunk_size::Integer=64, printStep::Bool=false)
1685+
train!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}, data, optim; gradient::Symbol=:ReverseDiff, kwargs...)
16811686
16821687
A function analogous to Flux.train! but with additional features and explicit parameters (faster).
16831688
16841689
# Arguments
16851690
- `loss` a loss function in the format `loss(p)`
1686-
- `params` a object holding the parameters
1691+
- `neuralFMU` a object holding the neuralFMU with its parameters
16871692
- `data` the training data (or often an iterator)
16881693
- `optim` the optimizer used for training
16891694
16901695
# Keywords
16911696
- `gradient` a symbol determining the AD-library for gradient computation, available are `:ForwardDiff`, `:Zygote` and :ReverseDiff (default)
1692-
- `cb` a custom callback function that is called after every training step
1693-
- `chunk_size` the chunk size for AD using ForwardDiff (ignored for other AD-methods)
1694-
- `printStep` a boolean determining wheater the gradient min/max is printed after every step (for gradient debugging)
1695-
- `proceed_on_assert` a boolean that determins wheater to throw an ecxeption on error or proceed training and just print the error
1696-
- `numThreads` [WIP]: an integer determining how many threads are used for training (how many gradients are generated in parallel)
1697+
- `cb` a custom callback function that is called after every training step (default `nothing`)
1698+
- `chunk_size` the chunk size for AD using ForwardDiff (ignored for other AD-methods) (default `:auto_fmiflux`)
1699+
- `printStep` a boolean determining wheater the gradient min/max is printed after every step (for gradient debugging) (default `false`)
1700+
- `proceed_on_assert` a boolean that determins wheater to throw an ecxeption on error or proceed training and just print the error (default `false`)
1701+
- `multiThreading`: a boolean that determins if multiple gradients are generated in parallel (default `false`)
16971702
- `multiObjective`: set this if the loss function returns multiple values (multi objective optimization), currently gradients are fired to the optimizer one after another (default `false`)
16981703
"""
16991704
function train!(loss, neuralFMU::Union{ME_NeuralFMU, CS_NeuralFMU}, data, optim; gradient::Symbol=:ReverseDiff, kwargs...)

0 commit comments

Comments
 (0)