Skip to content

Commit

Permalink
wip on making NN model work
Browse files Browse the repository at this point in the history
  • Loading branch information
trontrytel committed Feb 16, 2024
1 parent a03f09a commit cb52b5c
Show file tree
Hide file tree
Showing 8 changed files with 329 additions and 437 deletions.
8 changes: 1 addition & 7 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,12 @@ Thermodynamics = "b60c26fb-14c3-4610-9d3e-2d17fe7ff00c"

[weakdeps]
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
GaussianProcesses = "891a1506-143c-57d2-908e-e1f8e92e6de9"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"
MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845"
MLJModels = "d491faf4-2d78-11e9-2867-c94bc002c0b7"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"

[extensions]
EmulatorModelsExt = "DataFrames"
EmulatorModelsExt = ["DataFrames", "Flux", "MLJ", "MLJFlux"]

[compat]
CLIMAParameters = "0.9"
Expand Down
161 changes: 6 additions & 155 deletions ext/EmulatorModelsExt.jl
Original file line number Diff line number Diff line change
@@ -1,173 +1,25 @@
module EmulatorModelsExt

import MLJ
import MLJFlux
import Flux
import DataFrames as DF
import GaussianProcesses
import StatsBase
import Distributions

import CloudMicrophysics as CM
import Thermodynamics as TD
import Thermodynamics.Parameters as TDP
import CloudMicrophysics.AerosolActivation as AA
import CloudMicrophysics.AerosolModel as AM
import CloudMicrophysics.Parameters as CMP

struct NNBuilder <: MLJFlux.Builder
layer_sizes::Vector{Integer}
dropout::Vector{Float64}
end

function MLJFlux.build(builder::NNBuilder, rng, n_in, n_out)
@assert length(builder.layer_sizes) == length(builder.dropout)
num_hidden_layers = length(builder.layer_sizes)
init = Flux.glorot_uniform(rng)
layers::Vector{Any} = []
if num_hidden_layers == 0
push!(layers, Flux.Dense(n_in => n_out, init = init))
else
push!(
layers,
Flux.Dense(
n_in => builder.layer_sizes[1],
Flux.sigmoid_fast,
init = init,
),
)
end
for i in 1:num_hidden_layers
push!(layers, Flux.Dropout(builder.dropout[i]))
if i == num_hidden_layers
push!(
layers,
Flux.Dense(builder.layer_sizes[i] => n_out, init = init),
)
else
push!(
layers,
Flux.Dense(
builder.layer_sizes[i] => builder.layer_sizes[i + 1],
Flux.sigmoid_fast,
init = init,
),
)
end
end
return Flux.Chain(layers...)
end

mutable struct GPRegressor <: MLJ.Deterministic
num_gps::Integer
sample_size::Integer
use_ARG_weights::Bool
use_DTC::Bool
sample_size_inducing::Integer
end

function MLJ.fit(model::GPRegressor, verbosity, X, y)
gps = []
for i in 1:(model.num_gps)
if model.use_ARG_weights
weights = StatsBase.Weights([
Distributions.pdf(Distributions.Normal(0.0, 0.5), x) for
x in X.mode_1_ARG_act_frac
])
inds = StatsBase.sample(
1:DF.nrow(X),
weights,
model.sample_size,
replace = false,
)
inds_inducing = StatsBase.sample(
1:DF.nrow(X),
weights,
model.sample_size_inducing,
replace = false,
)
else
inds = StatsBase.sample(
1:DF.nrow(X),
model.sample_size,
replace = false,
)
inds_inducing = StatsBase.sample(
1:DF.nrow(X),
model.sample_size_inducing,
replace = false,
)
end
if model.use_DTC
gp = GaussianProcesses.DTC(
Matrix(X[inds, :])',
Matrix(X[inds_inducing, :])',
y[inds],
GaussianProcesses.MeanConst(StatsBase.mean(y)),
GaussianProcesses.SEArd(fill(4.0, DF.ncol(X)), 0.0),
2.0,
)
else
gp = GaussianProcesses.GPA(
Matrix(X[inds, :])',
y[inds],
GaussianProcesses.MeanConst(StatsBase.mean(y)),
GaussianProcesses.SEArd(fill(4.0, DF.ncol(X)), 0.0),
GaussianProcesses.GaussLik(2.0),
)
end
GaussianProcesses.optimize!(gp)
push!(gps, gp)
end
return gps, nothing, nothing
end

function MLJ.predict(::GPRegressor, fitresult, Xnew)
means = reduce(
hcat,
[GaussianProcesses.predict_f(gp, Matrix(Xnew)')[1] for gp in fitresult],
)
variances = reduce(
hcat,
[GaussianProcesses.predict_f(gp, Matrix(Xnew)')[2] for gp in fitresult],
)
return (sum(
means ./ variances,
dims = 2,
) ./ sum(1.0 ./ variances, dims = 2))[
:,
1,
]
end


"""
MLEmulatedAerosolActivation
The type for aerosol activation schemes that are emulated with an ML model
"""
struct MLAerosolActivationParameters <: CMP.ParametersType
ap::CMP.AerosolActivationParameters
machine::MLJ.Machine
end

function MLEmulatedAerosolActivation(
::Type{FT},
emulator_filepath::String,
) where {FT}
machine = MLJ.machine(emulator_filepath)
ap = CMP.AerosolActivationParameters(FT)
return MLEmulatedAerosolActivation(ap, machine)
end

function AA.N_activated_per_mode(
ml::MLAerosolActivationParameters,
machine::MLJ.Machine,
ap::CMP.AerosolActivationParameters,
aip::CMP.AirProperties,
tps::TDP.ThermodynamicsParameters,
T::FT,
p::FT,
w::FT,
q::TD.PhasePartition{FT},
) where {FT <: Real}
hygro = mean_hygroscopicity_parameter(ml.ap, ad)
hygro = mean_hygroscopicity_parameter(ap, ad)
return ntuple(Val(AM.n_modes(ad))) do i
# Model predicts activation of the first mode. So, swap each mode
# with the first mode repeatedly to predict all activations.
Expand All @@ -187,8 +39,7 @@ function AA.N_activated_per_mode(
:initial_pressure => p,
)
X = DF.DataFrame([merge(reduce(merge, per_mode_data), additional_data)])
max(FT(0), min(FT(1), MLJ.predict(ml.machine, X)[1])) *
ad.Modes[i].N
max(FT(0), min(FT(1), MLJ.predict(machine, X)[1])) * ad.Modes[i].N
end
end

Expand Down
Binary file added ml_test/2modal_nn_machine_naive.jls
Binary file not shown.
Loading

0 comments on commit cb52b5c

Please sign in to comment.