Skip to content

Commit

Permalink
Fix numba error related to deprecation in numpy 1.25 (#518)
Browse files Browse the repository at this point in the history
  • Loading branch information
Paul-Saves authored Feb 22, 2024
1 parent 006694e commit 041d931
Show file tree
Hide file tree
Showing 7 changed files with 46 additions and 43 deletions.
11 changes: 7 additions & 4 deletions smt/applications/mfk.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,9 @@ def predict_variances_all_levels(self, X, is_acting=None):
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
)

# Calculate recursively kriging variance at level i
Expand Down Expand Up @@ -843,16 +845,17 @@ def predict_variances_all_levels(self, X, is_acting=None):
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_
/ (2 * (self.nt_all[i] - p - q))
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t**2).sum(axis=0))
+ sigma2 * (u_**2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
Expand Down
1 change: 0 additions & 1 deletion smt/problems/ndim_step_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
N-dimensional step function problem.
"""


from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.tensor_product import TensorProduct
Expand Down
20 changes: 10 additions & 10 deletions smt/surrogate_models/krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -716,16 +716,16 @@ def _matrix_data_corr(
d_cont = d[:, np.logical_not(cat_features)]
if self.options["corr"] == "squar_sin_exp":
if self.options["categorical_kernel"] != MixIntKernelType.GOWER:
theta_cont_features[
-len([self.design_space.is_cat_mask]) :
] = np.atleast_2d(
np.array([True] * len([self.design_space.is_cat_mask]))
).T
theta_cat_features[1][
-len([self.design_space.is_cat_mask]) :
] = np.atleast_2d(
np.array([False] * len([self.design_space.is_cat_mask]))
).T
theta_cont_features[-len([self.design_space.is_cat_mask]) :] = (
np.atleast_2d(
np.array([True] * len([self.design_space.is_cat_mask]))
).T
)
theta_cat_features[1][-len([self.design_space.is_cat_mask]) :] = (
np.atleast_2d(
np.array([False] * len([self.design_space.is_cat_mask]))
).T
)

theta_cont = theta[theta_cont_features[:, 0]]
r_cont = _correlation_types[corr](theta_cont, d_cont)
Expand Down
24 changes: 12 additions & 12 deletions smt/surrogate_models/tests/test_surrogate_model_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,21 +524,21 @@ def test_genn(self):
genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size
genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
genn.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
genn.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
genn.options["lambd"] = (
0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
)
genn.options["gamma"] = (
1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
)
genn.options["deep"] = 2 # number of hidden layers
genn.options["wide"] = 6 # number of nodes per hidden layer
genn.options[
"mini_batch_size"
] = 64 # used to divide data into training batches (use for large data sets)
genn.options["mini_batch_size"] = (
64 # used to divide data into training batches (use for large data sets)
)
genn.options["num_epochs"] = 20 # number of passes through data
genn.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
genn.options["num_iterations"] = (
100 # number of optimizer iterations per mini-batch
)
genn.options["is_print"] = True # print output (or not)
load_smt_data(
genn, xt, yt, dyt_dxt
Expand Down
24 changes: 12 additions & 12 deletions smt/tests/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,21 +45,21 @@ def genn():
neural_net.options["alpha"] = 0.1 # learning rate that controls optimizer step size
neural_net.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
neural_net.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
neural_net.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
neural_net.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
neural_net.options["lambd"] = (
0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
)
neural_net.options["gamma"] = (
1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
)
neural_net.options["deep"] = 2 # number of hidden layers
neural_net.options["wide"] = 12 # number of nodes per hidden layer
neural_net.options[
"mini_batch_size"
] = 10000 # used to divide data into training batches (use for large data sets)
neural_net.options["mini_batch_size"] = (
10000 # used to divide data into training batches (use for large data sets)
)
neural_net.options["num_epochs"] = 25 # number of passes through data
neural_net.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
neural_net.options["num_iterations"] = (
100 # number of optimizer iterations per mini-batch
)
neural_net.options["is_print"] = True
return neural_net

Expand Down
8 changes: 4 additions & 4 deletions smt/utils/kriging.py
Original file line number Diff line number Diff line change
Expand Up @@ -1548,16 +1548,16 @@ def quadratic(x):
def matrix_data_corr_levels_cat_matrix(
i, n_levels, theta_cat, theta_bounds, is_ehh: bool
):
Theta_mat = np.zeros((n_levels[i], n_levels[i]))
Theta_mat = np.zeros((n_levels[i], n_levels[i]), dtype=np.float64)
L = np.zeros((n_levels[i], n_levels[i]))
v = 0
for j in range(n_levels[i]):
for k in range(n_levels[i] - j):
if j == k + j:
Theta_mat[j, k + j] = 1
Theta_mat[j, k + j] = 1.0
else:
Theta_mat[j, k + j] = theta_cat[v]
Theta_mat[k + j, j] = theta_cat[v]
Theta_mat[j, k + j] = theta_cat[v].item()
Theta_mat[k + j, j] = theta_cat[v].item()
v = v + 1

for j in range(n_levels[i]):
Expand Down
1 change: 1 addition & 0 deletions smt/utils/neural_net/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def load_csv(file=None, inputs=None, outputs=None, partials=None):

def index(header):
return headers.index(header)

else:
raise Exception("The file " + file + " does not exist")

Expand Down

0 comments on commit 041d931

Please sign in to comment.