Skip to content

Commit 24ee1e5

Browse files
committed
fixed new train method layout in examples
1 parent 177045b commit 24ee1e5

6 files changed

+9
-11
lines changed

examples/src/growing_horizon_ME.ipynb

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -641,10 +641,8 @@
641641
"outputs": [],
642642
"source": [
643643
"# train\n",
644-
"paramsNet = FMIFlux.params(neuralFMU)\n",
645-
"\n",
646644
"optim = Adam()\n",
647-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
645+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1000), optim; cb=()->callb(paramsNet)) "
648646
]
649647
},
650648
{

examples/src/juliacon_2023.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -740,7 +740,7 @@
740740
" \n",
741741
" # the actual training\n",
742742
" FMIFlux.train!(loss, # the loss function for training\n",
743-
" params, # the parameters to train\n",
743+
" neuralFMU, # the parameters to train\n",
744744
" Iterators.repeated((), steps), # an iterator repeating `steps` times\n",
745745
" optim; # the optimizer to train\n",
746746
" gradient=:ForwardDiff, # currently, only ForwarDiff leads to good results for multi-event systems\n",

examples/src/mdpi_2022.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -611,14 +611,14 @@
611611
"\n",
612612
"# we use ForwardDiff for gradinet determination, because the FMU throws multiple events per time instant (this is not supported by reverse mode AD)\n",
613613
"# the chunk_size controls the nuber of forward evaluations of the model (the bigger, the less evaluations)\n",
614-
"FMIFlux.train!(loss, params, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
614+
"FMIFlux.train!(loss, neuralFMU, Iterators.repeated((), batchLen), optim; gradient=:ForwardDiff, chunk_size=32, cb=updateScheduler) \n",
615615
"loss_after = batch_loss(params[1])"
616616
]
617617
},
618618
{
619619
"attachments": {},
620620
"cell_type": "markdown",
621-
"metadata": {},
621+
"metadata": {},
622622
"source": [
623623
"The batch loss (\"AVG\" and \"MAX\") is only updated every 5 steps, as defined in the scheduler. Every 25 steps, we plot the current batch element losses. Please note, that we only did around 100 training steps, so training has not converged for now. But we are curious and want to have a look on the intermediate results. \n",
624624
"\n",

examples/src/modelica_conference_2021.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -891,7 +891,7 @@
891891
"outputs": [],
892892
"source": [
893893
"optim = Adam()\n",
894-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
894+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1), optim; cb=()->callb(paramsNet)) "
895895
]
896896
},
897897
{
@@ -950,7 +950,7 @@
950950
"for run in 1:numRuns\n",
951951
" @time for epoch in 1:numEpochs\n",
952952
" @info \"Run: $(run)/$(numRuns) Epoch: $(epoch)/$(numEpochs)\"\n",
953-
" FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
953+
" FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), numIterations), optim; cb=()->callb(paramsNet))\n",
954954
" end\n",
955955
" flush(stderr)\n",
956956
" flush(stdout)\n",

examples/src/simple_hybrid_CS.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@
530530
"paramsNet = FMIFlux.params(csNeuralFMU)\n",
531531
"\n",
532532
"optim = Adam()\n",
533-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
533+
"FMIFlux.train!(lossSum, csNeuralFMU, Iterators.repeated((), 250), optim; cb=()->callb(paramsNet))"
534534
]
535535
},
536536
{

examples/src/simple_hybrid_ME.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@
500500
"paramsNet = FMIFlux.params(neuralFMU)\n",
501501
"\n",
502502
"optim = Adam()\n",
503-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
503+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 300), optim; cb=()->callb(paramsNet)) "
504504
]
505505
},
506506
{
@@ -563,7 +563,7 @@
563563
},
564564
"outputs": [],
565565
"source": [
566-
"FMIFlux.train!(lossSum, paramsNet, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
566+
"FMIFlux.train!(lossSum, neuralFMU, Iterators.repeated((), 1200), optim; cb=()->callb(paramsNet)) \n",
567567
"# plot results mass.s\n",
568568
"solutionAfter = neuralFMU(x₀)\n",
569569
"Plots.plot!(fig, solutionAfter; stateIndices=1:1, values=false, label=\"NeuralFMU (1500 epochs)\", linewidth=2)\n",

0 commit comments

Comments
 (0)