From 041d93188698f8b2261a0ec2e0a5e8d3f2885f51 Mon Sep 17 00:00:00 2001 From: Saves Paul Date: Thu, 22 Feb 2024 16:42:46 +0100 Subject: [PATCH] Fix numba error related to deprecation in numpy 1.25 (#518) --- smt/applications/mfk.py | 11 +++++---- smt/problems/ndim_step_function.py | 1 - smt/surrogate_models/krg_based.py | 20 ++++++++-------- .../tests/test_surrogate_model_examples.py | 24 +++++++++---------- smt/tests/test_all.py | 24 +++++++++---------- smt/utils/kriging.py | 8 +++---- smt/utils/neural_net/data.py | 1 + 7 files changed, 46 insertions(+), 43 deletions(-) diff --git a/smt/applications/mfk.py b/smt/applications/mfk.py index fb6846af9..37ff47e66 100644 --- a/smt/applications/mfk.py +++ b/smt/applications/mfk.py @@ -759,7 +759,9 @@ def predict_variances_all_levels(self, X, is_acting=None): sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2 MSE[:, 0] = sigma2 * ( # 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0) - 1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0) + 1 + - (r_t**2).sum(axis=0) + + (u_**2).sum(axis=0) ) # Calculate recursively kriging variance at level i @@ -843,8 +845,7 @@ def predict_variances_all_levels(self, X, is_acting=None): Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0] MSE[:, i] = ( # sigma2_rho * MSE[:, i - 1] - +Q_ - / (2 * (self.nt_all[i] - p - q)) + +Q_ / (2 * (self.nt_all[i] - p - q)) # * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0)) * (1 - (r_t**2).sum(axis=0)) + sigma2 * (u_**2).sum(axis=0) @@ -852,7 +853,9 @@ def predict_variances_all_levels(self, X, is_acting=None): else: MSE[:, i] = sigma2 * ( # 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0) - 1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0) + 1 + - (r_t**2).sum(axis=0) + + (u_**2).sum(axis=0) ) # + sigma2_rho * MSE[:, i - 1] if self.options["propagate_uncertainty"]: MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1] diff --git a/smt/problems/ndim_step_function.py b/smt/problems/ndim_step_function.py index 38de2bb73..e3ec09ed9 100644 --- a/smt/problems/ndim_step_function.py +++ b/smt/problems/ndim_step_function.py @@ -6,7 +6,6 @@ N-dimensional step function problem. """ - from smt.utils.options_dictionary import OptionsDictionary from smt.problems.problem import Problem from smt.problems.tensor_product import TensorProduct diff --git a/smt/surrogate_models/krg_based.py b/smt/surrogate_models/krg_based.py index 8a547033b..befe5bca3 100644 --- a/smt/surrogate_models/krg_based.py +++ b/smt/surrogate_models/krg_based.py @@ -716,16 +716,16 @@ def _matrix_data_corr( d_cont = d[:, np.logical_not(cat_features)] if self.options["corr"] == "squar_sin_exp": if self.options["categorical_kernel"] != MixIntKernelType.GOWER: - theta_cont_features[ - -len([self.design_space.is_cat_mask]) : - ] = np.atleast_2d( - np.array([True] * len([self.design_space.is_cat_mask])) - ).T - theta_cat_features[1][ - -len([self.design_space.is_cat_mask]) : - ] = np.atleast_2d( - np.array([False] * len([self.design_space.is_cat_mask])) - ).T + theta_cont_features[-len([self.design_space.is_cat_mask]) :] = ( + np.atleast_2d( + np.array([True] * len([self.design_space.is_cat_mask])) + ).T + ) + theta_cat_features[1][-len([self.design_space.is_cat_mask]) :] = ( + np.atleast_2d( + np.array([False] * len([self.design_space.is_cat_mask])) + ).T + ) theta_cont = theta[theta_cont_features[:, 0]] r_cont = _correlation_types[corr](theta_cont, d_cont) diff --git a/smt/surrogate_models/tests/test_surrogate_model_examples.py b/smt/surrogate_models/tests/test_surrogate_model_examples.py index c311bba31..d374a5129 100644 --- a/smt/surrogate_models/tests/test_surrogate_model_examples.py +++ b/smt/surrogate_models/tests/test_surrogate_model_examples.py @@ -524,21 +524,21 @@ def test_genn(self): genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization - genn.options[ - "lambd" - ] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization - genn.options[ - "gamma" - ] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + genn.options["lambd"] = ( + 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization + ) + genn.options["gamma"] = ( + 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + ) genn.options["deep"] = 2 # number of hidden layers genn.options["wide"] = 6 # number of nodes per hidden layer - genn.options[ - "mini_batch_size" - ] = 64 # used to divide data into training batches (use for large data sets) + genn.options["mini_batch_size"] = ( + 64 # used to divide data into training batches (use for large data sets) + ) genn.options["num_epochs"] = 20 # number of passes through data - genn.options[ - "num_iterations" - ] = 100 # number of optimizer iterations per mini-batch + genn.options["num_iterations"] = ( + 100 # number of optimizer iterations per mini-batch + ) genn.options["is_print"] = True # print output (or not) load_smt_data( genn, xt, yt, dyt_dxt diff --git a/smt/tests/test_all.py b/smt/tests/test_all.py index 72856c1a8..57615d84a 100644 --- a/smt/tests/test_all.py +++ b/smt/tests/test_all.py @@ -45,21 +45,21 @@ def genn(): neural_net.options["alpha"] = 0.1 # learning rate that controls optimizer step size neural_net.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization neural_net.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization - neural_net.options[ - "lambd" - ] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization - neural_net.options[ - "gamma" - ] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + neural_net.options["lambd"] = ( + 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization + ) + neural_net.options["gamma"] = ( + 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + ) neural_net.options["deep"] = 2 # number of hidden layers neural_net.options["wide"] = 12 # number of nodes per hidden layer - neural_net.options[ - "mini_batch_size" - ] = 10000 # used to divide data into training batches (use for large data sets) + neural_net.options["mini_batch_size"] = ( + 10000 # used to divide data into training batches (use for large data sets) + ) neural_net.options["num_epochs"] = 25 # number of passes through data - neural_net.options[ - "num_iterations" - ] = 100 # number of optimizer iterations per mini-batch + neural_net.options["num_iterations"] = ( + 100 # number of optimizer iterations per mini-batch + ) neural_net.options["is_print"] = True return neural_net diff --git a/smt/utils/kriging.py b/smt/utils/kriging.py index ea479bdb5..f2325873d 100644 --- a/smt/utils/kriging.py +++ b/smt/utils/kriging.py @@ -1548,16 +1548,16 @@ def quadratic(x): def matrix_data_corr_levels_cat_matrix( i, n_levels, theta_cat, theta_bounds, is_ehh: bool ): - Theta_mat = np.zeros((n_levels[i], n_levels[i])) + Theta_mat = np.zeros((n_levels[i], n_levels[i]), dtype=np.float64) L = np.zeros((n_levels[i], n_levels[i])) v = 0 for j in range(n_levels[i]): for k in range(n_levels[i] - j): if j == k + j: - Theta_mat[j, k + j] = 1 + Theta_mat[j, k + j] = 1.0 else: - Theta_mat[j, k + j] = theta_cat[v] - Theta_mat[k + j, j] = theta_cat[v] + Theta_mat[j, k + j] = theta_cat[v].item() + Theta_mat[k + j, j] = theta_cat[v].item() v = v + 1 for j in range(n_levels[i]): diff --git a/smt/utils/neural_net/data.py b/smt/utils/neural_net/data.py index 84278e659..a0548dba7 100644 --- a/smt/utils/neural_net/data.py +++ b/smt/utils/neural_net/data.py @@ -51,6 +51,7 @@ def load_csv(file=None, inputs=None, outputs=None, partials=None): def index(header): return headers.index(header) + else: raise Exception("The file " + file + " does not exist")